}
}
-unsigned
-vl_num_buffers_desired(struct pipe_screen *screen, enum pipe_video_profile profile)
-{
- assert(screen);
- switch (u_reduce_video_profile(profile)) {
- case PIPE_VIDEO_CODEC_MPEG12:
- return 4;
-
- default:
- return 1;
- }
-}
-
struct pipe_video_decoder *
vl_create_decoder(struct pipe_context *pipe,
enum pipe_video_profile profile,
enum pipe_video_entrypoint entrypoint,
enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height, unsigned max_references)
+ unsigned width, unsigned height, unsigned max_references,
+ bool expect_chunked_decode)
{
unsigned buffer_width, buffer_height;
bool pot_buffers;
switch (u_reduce_video_profile(profile)) {
case PIPE_VIDEO_CODEC_MPEG12:
- return vl_create_mpeg12_decoder(pipe, profile, entrypoint, chroma_format, buffer_width, buffer_height, max_references);
+ return vl_create_mpeg12_decoder(pipe, profile, entrypoint, chroma_format,
+ buffer_width, buffer_height, max_references,
+ expect_chunked_decode);
default:
return NULL;
}
bool
vl_profile_supported(struct pipe_screen *screen, enum pipe_video_profile profile);
-/**
- * the desired number of buffers for optimal operation
- */
-unsigned
-vl_num_buffers_desired(struct pipe_screen *screen, enum pipe_video_profile profile);
-
/**
* standard implementation of pipe->create_video_decoder
*/
enum pipe_video_profile profile,
enum pipe_video_entrypoint entrypoint,
enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height, unsigned max_references);
+ unsigned width, unsigned height, unsigned max_references,
+ bool expect_chunked_decode);
#endif /* vl_decoder_h */
buf->texels += 64 * num_blocks;
}
+static void
+vl_mpeg12_destroy_buffer(void *buffer)
+{
+ struct vl_mpeg12_buffer *buf = buffer;
+
+ assert(buf);
+
+ cleanup_zscan_buffer(buf);
+ cleanup_idct_buffer(buf);
+ cleanup_mc_buffer(buf);
+ vl_vb_cleanup(&buf->vertex_stream);
+
+ FREE(buf);
+}
+
static void
vl_mpeg12_destroy(struct pipe_video_decoder *decoder)
{
struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
+ unsigned i;
assert(decoder);
pipe_sampler_view_reference(&dec->zscan_normal, NULL);
pipe_sampler_view_reference(&dec->zscan_alternate, NULL);
+ for (i = 0; i < 4; ++i)
+ if (dec->dec_buffers[i])
+ vl_mpeg12_destroy_buffer(dec->dec_buffers[i]);
+
FREE(dec);
}
-static void *
-vl_mpeg12_create_buffer(struct pipe_video_decoder *decoder)
+static struct vl_mpeg12_buffer *
+vl_mpeg12_get_decode_buffer(struct vl_mpeg12_decoder *dec)
{
- struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
struct vl_mpeg12_buffer *buffer;
assert(dec);
+ buffer = vl_video_buffer_get_associated_data(dec->target, &dec->base);
+ if (buffer)
+ return buffer;
+
+ buffer = dec->dec_buffers[dec->current_buffer];
+ if (buffer)
+ return buffer;
+
buffer = CALLOC_STRUCT(vl_mpeg12_buffer);
if (buffer == NULL)
return NULL;
goto error_zscan;
if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
- vl_mpg12_bs_init(&buffer->bs, decoder);
+ vl_mpg12_bs_init(&buffer->bs, &dec->base);
+
+ if (dec->expect_chunked_decode)
+ vl_video_buffer_set_associated_data(dec->target, &dec->base,
+ buffer, vl_mpeg12_destroy_buffer);
+ else
+ dec->dec_buffers[dec->current_buffer] = buffer;
return buffer;
error_zscan:
- if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
- cleanup_idct_buffer(buffer);
+ cleanup_idct_buffer(buffer);
error_idct:
cleanup_mc_buffer(buffer);
return NULL;
}
-static void
-vl_mpeg12_destroy_buffer(struct pipe_video_decoder *decoder, void *buffer)
-{
- struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
- struct vl_mpeg12_buffer *buf = buffer;
-
- assert(dec && buf);
-
- cleanup_zscan_buffer(buf);
-
- if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
- cleanup_idct_buffer(buf);
-
- cleanup_mc_buffer(buf);
-
- vl_vb_cleanup(&buf->vertex_stream);
-
- FREE(buf);
-}
-
-static void
-vl_mpeg12_set_decode_buffer(struct pipe_video_decoder *decoder, void *buffer)
-{
- struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
-
- assert(dec && buffer);
-
- dec->current_buffer = buffer;
-}
-
static void
vl_mpeg12_set_picture_parameters(struct pipe_video_decoder *decoder,
struct pipe_picture_desc *picture)
assert(dec);
+ dec->target = target;
surfaces = target->get_surfaces(target);
for (i = 0; i < VL_MAX_PLANES; ++i)
pipe_surface_reference(&dec->target_surfaces[i], surfaces[i]);
unsigned i;
- assert(dec);
+ assert(dec && dec->target);
- buf = dec->current_buffer;
+ buf = vl_mpeg12_get_decode_buffer(dec);
assert(buf);
if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
unsigned i, j, mv_weights[2];
- assert(dec && dec->current_buffer);
+ assert(dec && dec->target);
assert(macroblocks && macroblocks->codec == PIPE_VIDEO_CODEC_MPEG12);
- buf = dec->current_buffer;
+ buf = vl_mpeg12_get_decode_buffer(dec);
assert(buf);
for (; num_macroblocks > 0; --num_macroblocks) {
unsigned i;
- assert(dec && dec->current_buffer);
+ assert(dec && dec->target);
- buf = dec->current_buffer;
+ buf = vl_mpeg12_get_decode_buffer(dec);
assert(buf);
for (i = 0; i < VL_MAX_PLANES; ++i)
unsigned i, j, component;
unsigned nr_components;
- assert(dec && dec->current_buffer);
+ assert(dec && dec->target);
- buf = dec->current_buffer;
+ buf = vl_mpeg12_get_decode_buffer(dec);
vl_vb_unmap(&buf->vertex_stream, dec->base.context);
vl_mc_render_ycbcr(i ? &dec->mc_c : &dec->mc_y, &buf->mc[i], j, buf->num_ycbcr_blocks[component]);
}
}
+ ++dec->current_buffer;
+ dec->current_buffer %= 4;
}
static void
enum pipe_video_profile profile,
enum pipe_video_entrypoint entrypoint,
enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height, unsigned max_references)
+ unsigned width, unsigned height, unsigned max_references,
+ bool expect_chunked_decode)
{
const unsigned block_size_pixels = BLOCK_WIDTH * BLOCK_HEIGHT;
const struct format_config *format_config;
dec->base.max_references = max_references;
dec->base.destroy = vl_mpeg12_destroy;
- dec->base.create_buffer = vl_mpeg12_create_buffer;
- dec->base.destroy_buffer = vl_mpeg12_destroy_buffer;
- dec->base.set_decode_buffer = vl_mpeg12_set_decode_buffer;
dec->base.set_picture_parameters = vl_mpeg12_set_picture_parameters;
dec->base.set_quant_matrix = vl_mpeg12_set_quant_matrix;
dec->base.set_decode_target = vl_mpeg12_set_decode_target;
dec->blocks_per_line = MAX2(util_next_power_of_two(dec->base.width) / block_size_pixels, 4);
dec->num_blocks = (dec->base.width * dec->base.height) / block_size_pixels;
dec->width_in_macroblocks = align(dec->base.width, MACROBLOCK_WIDTH) / MACROBLOCK_WIDTH;
+ dec->expect_chunked_decode = expect_chunked_decode;
/* TODO: Implement 422, 444 */
assert(dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
unsigned blocks_per_line;
unsigned num_blocks;
unsigned width_in_macroblocks;
+ bool expect_chunked_decode;
enum pipe_format zscan_source_format;
void *dsa;
- struct vl_mpeg12_buffer *current_buffer;
+ unsigned current_buffer;
+ struct vl_mpeg12_buffer *dec_buffers[4];
+
struct pipe_mpeg12_picture_desc picture_desc;
uint8_t intra_matrix[64];
uint8_t non_intra_matrix[64];
struct pipe_sampler_view *ref_frames[VL_MAX_REF_FRAMES][VL_MAX_PLANES];
+
+ struct pipe_video_buffer *target;
struct pipe_surface *target_surfaces[VL_MAX_PLANES];
};
enum pipe_video_profile profile,
enum pipe_video_entrypoint entrypoint,
enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height, unsigned max_references);
+ unsigned width, unsigned height, unsigned max_references,
+ bool expect_chunked_decode);
#endif /* vl_mpeg12_decoder_h */
return 1 << (max_2d_texture_level-1);
}
+void
+vl_video_buffer_set_associated_data(struct pipe_video_buffer *vbuf,
+ struct pipe_video_decoder *vdec,
+ void *associated_data,
+ void (*destroy_associated_data)(void *))
+{
+ vbuf->decoder = vdec;
+
+ if (vbuf->associated_data == associated_data)
+ return;
+
+ if (vbuf->associated_data)
+ vbuf->destroy_associated_data(vbuf->associated_data);
+
+ vbuf->associated_data = associated_data;
+ vbuf->destroy_associated_data = destroy_associated_data;
+}
+
+void *
+vl_video_buffer_get_associated_data(struct pipe_video_buffer *vbuf,
+ struct pipe_video_decoder *vdec)
+{
+ if (vbuf->decoder == vdec)
+ return vbuf->associated_data;
+ else
+ return NULL;
+}
+
static void
vl_video_buffer_destroy(struct pipe_video_buffer *buffer)
{
pipe_sampler_view_reference(&buf->sampler_view_components[i], NULL);
pipe_resource_reference(&buf->resources[i], NULL);
}
+ vl_video_buffer_set_associated_data(buffer, NULL, NULL, NULL);
FREE(buffer);
}
enum pipe_format format,
enum pipe_video_profile profile);
+/*
+ * set the associated data for the given video buffer
+ */
+void
+vl_video_buffer_set_associated_data(struct pipe_video_buffer *vbuf,
+ struct pipe_video_decoder *vdec,
+ void *associated_data,
+ void (*destroy_associated_data)(void *));
+
+/*
+ * get the associated data for the given video buffer
+ */
+void *
+vl_video_buffer_get_associated_data(struct pipe_video_buffer *vbuf,
+ struct pipe_video_decoder *vdec);
+
/**
* creates a video buffer, can be used as a standard implementation for pipe->create_video_buffer
*/
enum pipe_video_profile profile,
enum pipe_video_entrypoint entrypoint,
enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height, unsigned max_references)
+ unsigned width, unsigned height,
+ unsigned max_references, bool expect_chunked_decode)
{
struct nouveau_channel *chan = screen->channel;
struct nouveau_grobj *mpeg = NULL;
vl:
debug_printf("Using g3dvl renderer\n");
return vl_create_decoder(context, profile, entrypoint,
- chroma_format, width, height, max_references);
+ chroma_format, width, height,
+ max_references, expect_chunked_decode);
}
static struct pipe_sampler_view **
case PIPE_VIDEO_CAP_MAX_WIDTH:
case PIPE_VIDEO_CAP_MAX_HEIGHT:
return vl_video_buffer_max_size(pscreen);
- case PIPE_VIDEO_CAP_NUM_BUFFERS_DESIRED:
- return vl_num_buffers_desired(pscreen, profile);
default:
debug_printf("unknown video param: %d\n", param);
return 0;
enum pipe_video_profile profile,
enum pipe_video_entrypoint entrypoint,
enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height, unsigned max_references)
+ unsigned width, unsigned height,
+ unsigned max_references, bool expect_chunked_decode)
{
struct nouveau_screen *screen = &nvfx_context(context)->screen->base;
return nouveau_create_decoder(context, screen, profile, entrypoint,
- chroma_format, width, height, max_references);
+ chroma_format, width, height,
+ max_references, expect_chunked_decode);
}
static struct pipe_video_buffer *
enum pipe_video_profile profile,
enum pipe_video_entrypoint entrypoint,
enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height, unsigned max_references)
+ unsigned width, unsigned height,
+ unsigned max_references, bool expect_chunked_decode)
{
struct nouveau_screen *screen = nouveau_context(context)->screen;
return nouveau_create_decoder(context, screen, profile, entrypoint,
- chroma_format, width, height, max_references);
+ chroma_format, width, height,
+ max_references, expect_chunked_decode);
}
static struct pipe_video_buffer *
case PIPE_VIDEO_CAP_MAX_WIDTH:
case PIPE_VIDEO_CAP_MAX_HEIGHT:
return vl_video_buffer_max_size(screen);
- case PIPE_VIDEO_CAP_NUM_BUFFERS_DESIRED:
- return vl_num_buffers_desired(screen, profile);
default:
return 0;
}
case PIPE_VIDEO_CAP_MAX_WIDTH:
case PIPE_VIDEO_CAP_MAX_HEIGHT:
return vl_video_buffer_max_size(screen);
- case PIPE_VIDEO_CAP_NUM_BUFFERS_DESIRED:
- return vl_num_buffers_desired(screen, profile);
default:
return 0;
}
case PIPE_VIDEO_CAP_MAX_WIDTH:
case PIPE_VIDEO_CAP_MAX_HEIGHT:
return vl_video_buffer_max_size(screen);
- case PIPE_VIDEO_CAP_NUM_BUFFERS_DESIRED:
- return vl_num_buffers_desired(screen, profile);
default:
return 0;
}
case PIPE_VIDEO_CAP_MAX_WIDTH:
case PIPE_VIDEO_CAP_MAX_HEIGHT:
return vl_video_buffer_max_size(screen);
- case PIPE_VIDEO_CAP_NUM_BUFFERS_DESIRED:
- return vl_num_buffers_desired(screen, profile);
default:
return 0;
}
enum pipe_video_profile profile,
enum pipe_video_entrypoint entrypoint,
enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height, unsigned max_references );
+ unsigned width, unsigned height, unsigned max_references,
+ bool expect_chunked_decode);
/**
* Creates a video buffer as decoding target
*/
void (*destroy)(struct pipe_video_decoder *decoder);
- /**
- * Creates a decoder buffer
- */
- void *(*create_buffer)(struct pipe_video_decoder *decoder);
-
- /**
- * Destroys a decoder buffer
- */
- void (*destroy_buffer)(struct pipe_video_decoder *decoder, void *buffer);
-
- /**
- * set the current decoder buffer
- */
- void (*set_decode_buffer)(struct pipe_video_decoder *decoder, void *buffer);
-
/**
* set the picture parameters for the next frame
* only used for bitstream decoding
* get a individual surfaces for each plane
*/
struct pipe_surface **(*get_surfaces)(struct pipe_video_buffer *buffer);
+
+ /*
+ * auxiliary associated data
+ */
+ void *associated_data;
+
+ /*
+ * decoder where the associated data came from
+ */
+ struct pipe_video_decoder *decoder;
+
+ /*
+ * destroy the associated data
+ */
+ void (*destroy_associated_data)(void *associated_data);
};
#ifdef __cplusplus
PIPE_VIDEO_CAP_SUPPORTED = 0,
PIPE_VIDEO_CAP_NPOT_TEXTURES = 1,
PIPE_VIDEO_CAP_MAX_WIDTH = 2,
- PIPE_VIDEO_CAP_MAX_HEIGHT = 3,
- PIPE_VIDEO_CAP_NUM_BUFFERS_DESIRED = 4
+ PIPE_VIDEO_CAP_MAX_HEIGHT = 3
};
enum pipe_video_codec
vlVdpDevice *dev;
vlVdpDecoder *vldecoder;
VdpStatus ret;
- unsigned i;
bool supported;
VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Creating decoder\n");
pipe, p_profile,
PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
PIPE_VIDEO_CHROMA_FORMAT_420,
- width, height, max_references
+ width, height, max_references,
+ false
);
if (!vldecoder->decoder) {
goto error_decoder;
}
- vldecoder->num_buffers = pipe->screen->get_video_param
- (
- pipe->screen, p_profile,
- PIPE_VIDEO_CAP_NUM_BUFFERS_DESIRED
- );
- vldecoder->cur_buffer = 0;
-
- vldecoder->buffers = CALLOC(vldecoder->num_buffers, sizeof(void*));
- if (!vldecoder->buffers)
- goto error_alloc_buffers;
-
- for (i = 0; i < vldecoder->num_buffers; ++i) {
- vldecoder->buffers[i] = vldecoder->decoder->create_buffer(vldecoder->decoder);
- if (!vldecoder->buffers[i]) {
- ret = VDP_STATUS_ERROR;
- goto error_create_buffers;
- }
- }
-
*decoder = vlAddDataHTAB(vldecoder);
if (*decoder == 0) {
ret = VDP_STATUS_ERROR;
return VDP_STATUS_OK;
error_handle:
-error_create_buffers:
-
- for (i = 0; i < vldecoder->num_buffers; ++i)
- if (vldecoder->buffers[i])
- vldecoder->decoder->destroy_buffer(vldecoder->decoder, vldecoder->buffers[i]);
-
- FREE(vldecoder->buffers);
-
-error_alloc_buffers:
vldecoder->decoder->destroy(vldecoder->decoder);
vlVdpDecoderDestroy(VdpDecoder decoder)
{
vlVdpDecoder *vldecoder;
- unsigned i;
VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Destroying decoder\n");
if (!vldecoder)
return VDP_STATUS_INVALID_HANDLE;
- for (i = 0; i < vldecoder->num_buffers; ++i)
- if (vldecoder->buffers[i])
- vldecoder->decoder->destroy_buffer(vldecoder->decoder, vldecoder->buffers[i]);
-
- FREE(vldecoder->buffers);
-
vldecoder->decoder->destroy(vldecoder->decoder);
FREE(vldecoder);
// TODO: Recreate decoder with correct chroma
return VDP_STATUS_INVALID_CHROMA_TYPE;
- ++vldecoder->cur_buffer;
- vldecoder->cur_buffer %= vldecoder->num_buffers;
-
- dec->set_decode_buffer(dec, vldecoder->buffers[vldecoder->cur_buffer]);
dec->set_decode_target(dec, vlsurf->video_buffer);
switch (u_reduce_video_profile(dec->profile)) {
{
vlVdpDevice *device;
struct pipe_video_decoder *decoder;
- unsigned num_buffers;
- void **buffers;
- unsigned cur_buffer;
} vlVdpDecoder;
typedef uint32_t vlHandle;
ProfileToPipe(mc_type),
(mc_type & XVMC_IDCT) ? PIPE_VIDEO_ENTRYPOINT_IDCT : PIPE_VIDEO_ENTRYPOINT_MC,
FormatToPipe(chroma_format),
- width, height, 2
+ width, height, 2,
+ true
);
if (!context_priv->decoder) {
context_priv = surface->context->privData;
decoder = context_priv->decoder;
- if (surface->decode_buffer)
- decoder->set_decode_buffer(decoder, surface->decode_buffer);
decoder->set_decode_target(decoder, surface->video_buffer);
for (i = 0; i < 2; ++i) {
if (!surface_priv)
return BadAlloc;
- if (context_priv->decoder->create_buffer)
- surface_priv->decode_buffer = context_priv->decoder->create_buffer(context_priv->decoder);
surface_priv->video_buffer = pipe->create_video_buffer
(
pipe, PIPE_FORMAT_NV12, context_priv->decoder->chroma_format,
SetDecoderStatus(surface_priv);
context_priv->decoder->end_frame(context_priv->decoder);
}
- if (surface_priv->decode_buffer)
- context_priv->decoder->destroy_buffer(context_priv->decoder, surface_priv->decode_buffer);
surface_priv->video_buffer->destroy(surface_priv->video_buffer);
FREE(surface_priv);
surface->privData = NULL;
typedef struct
{
- void *decode_buffer;
struct pipe_video_buffer *video_buffer;
/* nonzero if this picture is already being decoded */