int t;
for (t = 0; t < 2; ++t) {
int motion_code;
- int r_size = bs->desc.f_code[s][t];
+ int r_size = bs->desc->f_code[s][t];
vl_vlc_fillbits(&bs->vlc);
motion_code = vl_vlc_get_vlclbf(&bs->vlc, tbl_B10, 11);
if (mb->macroblock_modes.bits.frame_motion_type == PIPE_MPEG12_MO_TYPE_FIELD) {
mb->motion_vertical_field_select |= vl_vlc_get_uimsbf(&bs->vlc, 1) << s;
motion_vector(bs, 0, s, dmv, delta, dmvector);
- mb->PMV[0][s][0] = wrap(mb->PMV[0][s][0] + delta[0], bs->desc.f_code[s][0]);
- mb->PMV[0][s][1] = wrap(DIV2DOWN(mb->PMV[0][s][1]) + delta[1], bs->desc.f_code[s][1]) * 2;
+ mb->PMV[0][s][0] = wrap(mb->PMV[0][s][0] + delta[0], bs->desc->f_code[s][0]);
+ mb->PMV[0][s][1] = wrap(DIV2DOWN(mb->PMV[0][s][1]) + delta[1], bs->desc->f_code[s][1]) * 2;
mb->motion_vertical_field_select |= vl_vlc_get_uimsbf(&bs->vlc, 1) << (s + 2);
motion_vector(bs, 1, s, dmv, delta, dmvector);
- mb->PMV[1][s][0] = wrap(mb->PMV[1][s][0] + delta[0], bs->desc.f_code[s][0]);
- mb->PMV[1][s][1] = wrap(DIV2DOWN(mb->PMV[1][s][1]) + delta[1], bs->desc.f_code[s][1]) * 2;
+ mb->PMV[1][s][0] = wrap(mb->PMV[1][s][0] + delta[0], bs->desc->f_code[s][0]);
+ mb->PMV[1][s][1] = wrap(DIV2DOWN(mb->PMV[1][s][1]) + delta[1], bs->desc->f_code[s][1]) * 2;
} else {
motion_vector(bs, 0, s, dmv, delta, dmvector);
- mb->PMV[0][s][0] = wrap(mb->PMV[0][s][0] + delta[0], bs->desc.f_code[s][0]);
- mb->PMV[0][s][1] = wrap(mb->PMV[0][s][1] + delta[1], bs->desc.f_code[s][1]);
+ mb->PMV[0][s][0] = wrap(mb->PMV[0][s][0] + delta[0], bs->desc->f_code[s][0]);
+ mb->PMV[0][s][1] = wrap(mb->PMV[0][s][1] + delta[1], bs->desc->f_code[s][1]);
}
}
}
static INLINE void
-decode_slice(struct vl_mpg12_bs *bs)
+decode_slice(struct vl_mpg12_bs *bs, struct pipe_video_buffer *target)
{
struct pipe_mpeg12_macroblock mb;
short dct_blocks[64*6];
reset_predictor(bs);
vl_vlc_fillbits(&bs->vlc);
- dct_scale = quant_scale[bs->desc.q_scale_type][vl_vlc_get_uimsbf(&bs->vlc, 5)];
+ dct_scale = quant_scale[bs->desc->q_scale_type][vl_vlc_get_uimsbf(&bs->vlc, 5)];
if (vl_vlc_get_uimsbf(&bs->vlc, 1))
while (vl_vlc_get_uimsbf(&bs->vlc, 9) & 1)
inc += vl_vlc_get_vlclbf(&bs->vlc, tbl_B1, 11);
if (x != -1) {
mb.num_skipped_macroblocks = inc - 1;
- bs->decoder->decode_macroblock(bs->decoder, &mb.base, 1);
+ bs->decoder->decode_macroblock(bs->decoder, target, &bs->desc->base, &mb.base, 1);
}
mb.x = x += inc;
- switch (bs->desc.picture_coding_type) {
+ switch (bs->desc->picture_coding_type) {
case PIPE_MPEG12_PICTURE_CODING_TYPE_I:
mb.macroblock_type = vl_vlc_get_vlclbf(&bs->vlc, tbl_B2, 2);
break;
mb.macroblock_modes.value = 0;
if (mb.macroblock_type & (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD)) {
- if (bs->desc.picture_structure == PIPE_MPEG12_PICTURE_STRUCTURE_FRAME) {
- if (bs->desc.frame_pred_frame_dct == 0)
+ if (bs->desc->picture_structure == PIPE_MPEG12_PICTURE_STRUCTURE_FRAME) {
+ if (bs->desc->frame_pred_frame_dct == 0)
mb.macroblock_modes.bits.frame_motion_type = vl_vlc_get_uimsbf(&bs->vlc, 2);
else
mb.macroblock_modes.bits.frame_motion_type = 2;
} else
mb.macroblock_modes.bits.field_motion_type = vl_vlc_get_uimsbf(&bs->vlc, 2);
- } else if ((mb.macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA) && bs->desc.concealment_motion_vectors) {
- if (bs->desc.picture_structure == PIPE_MPEG12_PICTURE_STRUCTURE_FRAME)
+ } else if ((mb.macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA) && bs->desc->concealment_motion_vectors) {
+ if (bs->desc->picture_structure == PIPE_MPEG12_PICTURE_STRUCTURE_FRAME)
mb.macroblock_modes.bits.frame_motion_type = 2;
else
mb.macroblock_modes.bits.field_motion_type = 1;
}
- if (bs->desc.picture_structure == PIPE_MPEG12_PICTURE_STRUCTURE_FRAME &&
- bs->desc.frame_pred_frame_dct == 0 &&
+ if (bs->desc->picture_structure == PIPE_MPEG12_PICTURE_STRUCTURE_FRAME &&
+ bs->desc->frame_pred_frame_dct == 0 &&
mb.macroblock_type & (PIPE_MPEG12_MB_TYPE_INTRA | PIPE_MPEG12_MB_TYPE_PATTERN))
mb.macroblock_modes.bits.dct_type = vl_vlc_get_uimsbf(&bs->vlc, 1);
if (mb.macroblock_type & PIPE_MPEG12_MB_TYPE_QUANT)
- dct_scale = quant_scale[bs->desc.q_scale_type][vl_vlc_get_uimsbf(&bs->vlc, 5)];
+ dct_scale = quant_scale[bs->desc->q_scale_type][vl_vlc_get_uimsbf(&bs->vlc, 5)];
- if (inc > 1 && bs->desc.picture_coding_type == PIPE_MPEG12_PICTURE_CODING_TYPE_P)
+ if (inc > 1 && bs->desc->picture_coding_type == PIPE_MPEG12_PICTURE_CODING_TYPE_P)
memset(mb.PMV, 0, sizeof(mb.PMV));
mb.motion_vertical_field_select = 0;
if ((mb.macroblock_type & PIPE_MPEG12_MB_TYPE_MOTION_FORWARD) ||
- (mb.macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA && bs->desc.concealment_motion_vectors)) {
- if (bs->desc.picture_structure == PIPE_MPEG12_PICTURE_STRUCTURE_FRAME)
+ (mb.macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA && bs->desc->concealment_motion_vectors)) {
+ if (bs->desc->picture_structure == PIPE_MPEG12_PICTURE_STRUCTURE_FRAME)
motion_vector_frame(bs, 0, &mb);
else
motion_vector_field(bs, 0, &mb);
}
if (mb.macroblock_type & PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD) {
- if (bs->desc.picture_structure == PIPE_MPEG12_PICTURE_STRUCTURE_FRAME)
+ if (bs->desc->picture_structure == PIPE_MPEG12_PICTURE_STRUCTURE_FRAME)
motion_vector_frame(bs, 1, &mb);
else
motion_vector_field(bs, 1, &mb);
}
- if (mb.macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA && bs->desc.concealment_motion_vectors) {
+ if (mb.macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA && bs->desc->concealment_motion_vectors) {
unsigned extra = vl_vlc_get_uimsbf(&bs->vlc, 1);
mb.PMV[1][0][0] = mb.PMV[0][0][0];
mb.PMV[1][0][1] = mb.PMV[0][0][1];
} while (vl_vlc_bits_left(&bs->vlc) && vl_vlc_peekbits(&bs->vlc, 23));
mb.num_skipped_macroblocks = 0;
- bs->decoder->decode_macroblock(bs->decoder, &mb.base, 1);
+ bs->decoder->decode_macroblock(bs->decoder, target, &bs->desc->base, &mb.base, 1);
}
void
}
void
-vl_mpg12_bs_set_picture_desc(struct vl_mpg12_bs *bs, struct pipe_mpeg12_picture_desc *picture)
-{
- bs->desc = *picture;
- bs->intra_dct_tbl = picture->intra_vlc_format ? tbl_B15 : tbl_B14_AC;
-}
-
-void
-vl_mpg12_bs_decode(struct vl_mpg12_bs *bs, unsigned num_buffers,
- const void * const *buffers, const unsigned *sizes)
+vl_mpg12_bs_decode(struct vl_mpg12_bs *bs,
+ struct pipe_video_buffer *target,
+ struct pipe_mpeg12_picture_desc *picture,
+ unsigned num_buffers,
+ const void * const *buffers,
+ const unsigned *sizes)
{
assert(bs);
+ bs->desc = picture;
+ bs->intra_dct_tbl = picture->intra_vlc_format ? tbl_B15 : tbl_B14_AC;
+
vl_vlc_init(&bs->vlc, num_buffers, buffers, sizes);
while (vl_vlc_bits_left(&bs->vlc) > 32) {
uint32_t code = vl_vlc_peekbits(&bs->vlc, 32);
if (code >= 0x101 && code <= 0x1AF) {
vl_vlc_eatbits(&bs->vlc, 24);
- decode_slice(bs);
+ decode_slice(bs, target);
/* align to a byte again */
vl_vlc_eatbits(&bs->vlc, vl_vlc_valid_bits(&bs->vlc) & 7);
{
struct pipe_video_decoder *decoder;
- struct pipe_mpeg12_picture_desc desc;
+ struct pipe_mpeg12_picture_desc *desc;
struct dct_coeff *intra_dct_tbl;
struct vl_vlc vlc;
vl_mpg12_bs_init(struct vl_mpg12_bs *bs, struct pipe_video_decoder *decoder);
void
-vl_mpg12_bs_set_picture_desc(struct vl_mpg12_bs *bs, struct pipe_mpeg12_picture_desc *picture);
-
-void
-vl_mpg12_bs_decode(struct vl_mpg12_bs *bs, unsigned num_buffers,
- const void * const *buffers, const unsigned *sizes);
+vl_mpg12_bs_decode(struct vl_mpg12_bs *bs,
+ struct pipe_video_buffer *target,
+ struct pipe_mpeg12_picture_desc *picture,
+ unsigned num_buffers,
+ const void * const *buffers,
+ const unsigned *sizes);
#endif /* vl_mpeg12_bitstream_h */
}
static struct vl_mpeg12_buffer *
-vl_mpeg12_get_decode_buffer(struct vl_mpeg12_decoder *dec)
+vl_mpeg12_get_decode_buffer(struct vl_mpeg12_decoder *dec, struct pipe_video_buffer *target)
{
struct vl_mpeg12_buffer *buffer;
assert(dec);
- buffer = vl_video_buffer_get_associated_data(dec->target, &dec->base);
+ buffer = vl_video_buffer_get_associated_data(target, &dec->base);
if (buffer)
return buffer;
vl_mpg12_bs_init(&buffer->bs, &dec->base);
if (dec->expect_chunked_decode)
- vl_video_buffer_set_associated_data(dec->target, &dec->base,
+ vl_video_buffer_set_associated_data(target, &dec->base,
buffer, vl_mpeg12_destroy_buffer);
else
dec->dec_buffers[dec->current_buffer] = buffer;
}
static void
-vl_mpeg12_set_picture_parameters(struct pipe_video_decoder *decoder,
- struct pipe_picture_desc *picture)
-{
- struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
- struct pipe_mpeg12_picture_desc *pic = (struct pipe_mpeg12_picture_desc *)picture;
-
- assert(dec && pic);
-
- dec->picture_desc = *pic;
-}
-
-static void
-vl_mpeg12_set_quant_matrix(struct pipe_video_decoder *decoder,
- const struct pipe_quant_matrix *matrix)
-{
- struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
- const struct pipe_mpeg12_quant_matrix *m = (const struct pipe_mpeg12_quant_matrix *)matrix;
-
- assert(dec);
- assert(matrix->codec == PIPE_VIDEO_CODEC_MPEG12);
-
- memcpy(dec->intra_matrix, m->intra_matrix, 64);
- memcpy(dec->non_intra_matrix, m->non_intra_matrix, 64);
-}
-
-static void
-vl_mpeg12_set_decode_target(struct pipe_video_decoder *decoder,
- struct pipe_video_buffer *target)
-{
- struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
- struct pipe_surface **surfaces;
- unsigned i;
-
- assert(dec);
-
- dec->target = target;
- surfaces = target->get_surfaces(target);
- for (i = 0; i < VL_MAX_PLANES; ++i)
- pipe_surface_reference(&dec->target_surfaces[i], surfaces[i]);
-}
-
-static void
-vl_mpeg12_set_reference_frames(struct pipe_video_decoder *decoder,
- struct pipe_video_buffer **ref_frames,
- unsigned num_ref_frames)
-{
- struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
- struct pipe_sampler_view **sv;
- unsigned i,j;
-
- assert(dec);
- assert(num_ref_frames <= VL_MAX_REF_FRAMES);
-
- for (i = 0; i < num_ref_frames; ++i) {
- sv = ref_frames[i]->get_sampler_view_planes(ref_frames[i]);
- for (j = 0; j < VL_MAX_PLANES; ++j)
- pipe_sampler_view_reference(&dec->ref_frames[i][j], sv[j]);
- }
-
- for (; i < VL_MAX_REF_FRAMES; ++i)
- for (j = 0; j < VL_MAX_PLANES; ++j)
- pipe_sampler_view_reference(&dec->ref_frames[i][j], NULL);
-}
-
-static void
-vl_mpeg12_begin_frame(struct pipe_video_decoder *decoder)
+vl_mpeg12_begin_frame(struct pipe_video_decoder *decoder,
+ struct pipe_video_buffer *target,
+ struct pipe_picture_desc *picture)
{
struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
+ struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
struct vl_mpeg12_buffer *buf;
struct pipe_resource *tex;
struct pipe_box rect = { 0, 0, 0, 1, 1, 1 };
+ uint8_t intra_matrix[64];
+ uint8_t non_intra_matrix[64];
+
unsigned i;
- assert(dec && dec->target);
+ assert(dec && target && picture);
- buf = vl_mpeg12_get_decode_buffer(dec);
+ buf = vl_mpeg12_get_decode_buffer(dec, target);
assert(buf);
- if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
- dec->intra_matrix[0] = 1 << (7 - dec->picture_desc.intra_dc_precision);
+ if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
+ memcpy(intra_matrix, desc->intra_matrix, sizeof(intra_matrix));
+ memcpy(non_intra_matrix, desc->non_intra_matrix, sizeof(non_intra_matrix));
+ intra_matrix[0] = 1 << (7 - desc->intra_dc_precision);
+ } else {
+ memset(intra_matrix, 0x10, sizeof(intra_matrix));
+ memset(non_intra_matrix, 0x10, sizeof(non_intra_matrix));
+ }
for (i = 0; i < VL_MAX_PLANES; ++i) {
struct vl_zscan *zscan = i == 0 ? &dec->zscan_y : &dec->zscan_c;
- vl_zscan_upload_quant(zscan, &buf->zscan[i], dec->intra_matrix, true);
- vl_zscan_upload_quant(zscan, &buf->zscan[i], dec->non_intra_matrix, false);
+ vl_zscan_upload_quant(zscan, &buf->zscan[i], intra_matrix, true);
+ vl_zscan_upload_quant(zscan, &buf->zscan[i], non_intra_matrix, false);
}
vl_vb_map(&buf->vertex_stream, dec->base.context);
for (i = 0; i < VL_MAX_REF_FRAMES; ++i)
buf->mv_stream[i] = vl_vb_get_mv_stream(&buf->vertex_stream, i);
- if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
- vl_mpg12_bs_set_picture_desc(&buf->bs, &dec->picture_desc);
-
- } else {
-
+ if (dec->base.entrypoint >= PIPE_VIDEO_ENTRYPOINT_IDCT) {
for (i = 0; i < VL_MAX_PLANES; ++i)
vl_zscan_set_layout(&buf->zscan[i], dec->zscan_linear);
}
static void
vl_mpeg12_decode_macroblock(struct pipe_video_decoder *decoder,
+ struct pipe_video_buffer *target,
+ struct pipe_picture_desc *picture,
const struct pipe_macroblock *macroblocks,
unsigned num_macroblocks)
{
struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
const struct pipe_mpeg12_macroblock *mb = (const struct pipe_mpeg12_macroblock *)macroblocks;
+ struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
struct vl_mpeg12_buffer *buf;
unsigned i, j, mv_weights[2];
- assert(dec && dec->target);
+ assert(dec && target && picture);
assert(macroblocks && macroblocks->codec == PIPE_VIDEO_CODEC_MPEG12);
- buf = vl_mpeg12_get_decode_buffer(dec);
+ buf = vl_mpeg12_get_decode_buffer(dec, target);
assert(buf);
for (; num_macroblocks > 0; --num_macroblocks) {
MacroBlockTypeToPipeWeights(mb, mv_weights);
for (i = 0; i < 2; ++i) {
- if (!dec->ref_frames[i][0]) continue;
+ if (!desc->ref[i]) continue;
buf->mv_stream[i][mb_addr] = MotionVectorToPipe
(
if (mb->num_skipped_macroblocks > 0) {
struct vl_motionvector skipped_mv[2];
- if (dec->ref_frames[0][0] && !dec->ref_frames[1][0]) {
+ if (desc->ref[0] && !desc->ref[1]) {
skipped_mv[0].top.x = skipped_mv[0].top.y = 0;
skipped_mv[0].top.weight = PIPE_VIDEO_MV_WEIGHT_MAX;
} else {
++mb_addr;
for (i = 0; i < mb->num_skipped_macroblocks; ++i, ++mb_addr) {
for (j = 0; j < 2; ++j) {
- if (!dec->ref_frames[j][0]) continue;
+ if (!desc->ref[j]) continue;
buf->mv_stream[j][mb_addr] = skipped_mv[j];
}
static void
vl_mpeg12_decode_bitstream(struct pipe_video_decoder *decoder,
+ struct pipe_video_buffer *target,
+ struct pipe_picture_desc *picture,
unsigned num_buffers,
const void * const *buffers,
const unsigned *sizes)
{
struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
+ struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
struct vl_mpeg12_buffer *buf;
unsigned i;
- assert(dec && dec->target);
+ assert(dec && target && picture);
- buf = vl_mpeg12_get_decode_buffer(dec);
+ buf = vl_mpeg12_get_decode_buffer(dec, target);
assert(buf);
for (i = 0; i < VL_MAX_PLANES; ++i)
- vl_zscan_set_layout(&buf->zscan[i], dec->picture_desc.alternate_scan ?
+ vl_zscan_set_layout(&buf->zscan[i], desc->alternate_scan ?
dec->zscan_alternate : dec->zscan_normal);
- vl_mpg12_bs_decode(&buf->bs, num_buffers, buffers, sizes);
+ vl_mpg12_bs_decode(&buf->bs, target, desc, num_buffers, buffers, sizes);
}
static void
-vl_mpeg12_end_frame(struct pipe_video_decoder *decoder)
+vl_mpeg12_end_frame(struct pipe_video_decoder *decoder,
+ struct pipe_video_buffer *target,
+ struct pipe_picture_desc *picture)
{
struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
+ struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
+ struct pipe_sampler_view **ref_frames[2];
struct pipe_sampler_view **mc_source_sv;
+ struct pipe_surface **target_surfaces;
struct pipe_vertex_buffer vb[3];
struct vl_mpeg12_buffer *buf;
unsigned i, j, component;
unsigned nr_components;
- assert(dec && dec->target);
+ assert(dec && target && picture);
- buf = vl_mpeg12_get_decode_buffer(dec);
+ buf = vl_mpeg12_get_decode_buffer(dec, target);
vl_vb_unmap(&buf->vertex_stream, dec->base.context);
vb[0] = dec->quads;
vb[1] = dec->pos;
+ target_surfaces = target->get_surfaces(target);
+
+ for (i = 0; i < VL_MAX_REF_FRAMES; ++i) {
+ if (desc->ref[i])
+ ref_frames[i] = desc->ref[i]->get_sampler_view_planes(desc->ref[i]);
+ else
+ ref_frames[i] = NULL;
+ }
+
dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_mv);
for (i = 0; i < VL_MAX_PLANES; ++i) {
- if (!dec->target_surfaces[i]) continue;
+ if (!target_surfaces[i]) continue;
- vl_mc_set_surface(&buf->mc[i], dec->target_surfaces[i]);
+ vl_mc_set_surface(&buf->mc[i], target_surfaces[i]);
for (j = 0; j < VL_MAX_REF_FRAMES; ++j) {
- if (!dec->ref_frames[j][i]) continue;
+ if (!ref_frames[j] || !ref_frames[j][i]) continue;
vb[2] = vl_vb_get_mv(&buf->vertex_stream, j);;
dec->base.context->set_vertex_buffers(dec->base.context, 3, vb);
- vl_mc_render_ref(i ? &dec->mc_c : &dec->mc_y, &buf->mc[i], dec->ref_frames[j][i]);
+ vl_mc_render_ref(i ? &dec->mc_c : &dec->mc_y, &buf->mc[i], ref_frames[j][i]);
}
}
mc_source_sv = dec->mc_source->get_sampler_view_planes(dec->mc_source);
for (i = 0, component = 0; i < VL_MAX_PLANES; ++i) {
- if (!dec->target_surfaces[i]) continue;
+ if (!target_surfaces[i]) continue;
- nr_components = util_format_get_nr_components(dec->target_surfaces[i]->texture->format);
+ nr_components = util_format_get_nr_components(target_surfaces[i]->texture->format);
for (j = 0; j < nr_components; ++j, ++component) {
if (!buf->num_ycbcr_blocks[i]) continue;
dec->base.max_references = max_references;
dec->base.destroy = vl_mpeg12_destroy;
- dec->base.set_picture_parameters = vl_mpeg12_set_picture_parameters;
- dec->base.set_quant_matrix = vl_mpeg12_set_quant_matrix;
- dec->base.set_decode_target = vl_mpeg12_set_decode_target;
- dec->base.set_reference_frames = vl_mpeg12_set_reference_frames;
dec->base.begin_frame = vl_mpeg12_begin_frame;
dec->base.decode_macroblock = vl_mpeg12_decode_macroblock;
dec->base.decode_bitstream = vl_mpeg12_decode_bitstream;
if (!init_pipe_state(dec))
goto error_pipe_state;
- memset(dec->intra_matrix, 0x10, 64);
- memset(dec->non_intra_matrix, 0x10, 64);
-
return &dec->base;
error_pipe_state:
unsigned current_buffer;
struct vl_mpeg12_buffer *dec_buffers[4];
-
- struct pipe_mpeg12_picture_desc picture_desc;
- uint8_t intra_matrix[64];
- uint8_t non_intra_matrix[64];
- struct pipe_sampler_view *ref_frames[VL_MAX_REF_FRAMES][VL_MAX_PLANES];
-
- struct pipe_video_buffer *target;
- struct pipe_surface *target_surfaces[VL_MAX_PLANES];
};
struct vl_mpeg12_buffer
}
static void
-nouveau_decoder_set_picture_parameters(struct pipe_video_decoder *decoder,
- struct pipe_picture_desc *picture_desc)
+nouveau_decoder_begin_frame(struct pipe_video_decoder *decoder,
+ struct pipe_video_buffer *target,
+ struct pipe_picture_desc *picture)
{
- struct nouveau_decoder *dec = (struct nouveau_decoder *)decoder;
- struct pipe_mpeg12_picture_desc *desc;
- desc = (struct pipe_mpeg12_picture_desc *)picture_desc;
- dec->picture_structure = desc->picture_structure;
-}
-
-static void
-nouveau_decoder_set_reference_frames(struct pipe_video_decoder *decoder,
- struct pipe_video_buffer **buffers,
- unsigned count)
-{
- struct nouveau_decoder *dec = (struct nouveau_decoder *)decoder;
- if (count >= 1 && buffers[0])
- dec->past = nouveau_decoder_surface_index(dec, buffers[0]);
- if (count >= 2 && buffers[1])
- dec->future = nouveau_decoder_surface_index(dec, buffers[1]);
-}
-
-static void
-nouveau_decoder_set_decode_target(struct pipe_video_decoder *decoder,
- struct pipe_video_buffer *buffer)
-{
- struct nouveau_decoder *dec = (struct nouveau_decoder *)decoder;
- dec->current = nouveau_decoder_surface_index(dec, buffer);
}
static void
nouveau_decoder_decode_macroblock(struct pipe_video_decoder *decoder,
+ struct pipe_video_buffer *target,
+ struct pipe_picture_desc *picture,
const struct pipe_macroblock *pipe_mb,
unsigned num_macroblocks)
{
struct nouveau_decoder *dec = (struct nouveau_decoder *)decoder;
+ struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc*)picture;
const struct pipe_mpeg12_macroblock *mb;
unsigned i;
+ assert(target->width == decoder->width);
+ assert(target->height == decoder->height);
+
+ dec->current = nouveau_decoder_surface_index(dec, target);
assert(dec->current < 8);
+ dec->picture_structure = desc->picture_structure;
+ if (desc->ref[1])
+ dec->future = nouveau_decoder_surface_index(dec, desc->ref[1]);
+ if (desc->ref[0])
+ dec->past = nouveau_decoder_surface_index(dec, desc->ref[0]);
if (nouveau_vpe_init(dec)) return;
mb = (const struct pipe_mpeg12_macroblock *)pipe_mb;
}
}
+static void
+nouveau_decoder_end_frame(struct pipe_video_decoder *decoder,
+ struct pipe_video_buffer *target,
+ struct pipe_picture_desc *picture)
+{
+}
+
static void
nouveau_decoder_flush(struct pipe_video_decoder *decoder)
{
FREE(dec);
}
-static void
-nouveau_decoder_begin_frame(struct pipe_video_decoder *decoder)
-{
-}
-
-static void
-nouveau_decoder_end_frame(struct pipe_video_decoder *decoder)
-{
-}
-
static struct pipe_video_decoder *
nouveau_create_decoder(struct pipe_context *context,
struct nouveau_screen *screen,
dec->base.max_references = max_references;
dec->base.destroy = nouveau_decoder_destroy;
dec->base.begin_frame = nouveau_decoder_begin_frame;
- dec->base.end_frame = nouveau_decoder_end_frame;
- dec->base.set_decode_target = nouveau_decoder_set_decode_target;
- dec->base.set_picture_parameters = nouveau_decoder_set_picture_parameters;
- dec->base.set_reference_frames = nouveau_decoder_set_reference_frames;
dec->base.decode_macroblock = nouveau_decoder_decode_macroblock;
+ dec->base.begin_frame = nouveau_decoder_end_frame;
dec->base.flush = nouveau_decoder_flush;
dec->screen = screen;
*/
void (*destroy)(struct pipe_video_decoder *decoder);
- /**
- * set the picture parameters for the next frame
- * only used for bitstream decoding
- */
- void (*set_picture_parameters)(struct pipe_video_decoder *decoder,
- struct pipe_picture_desc *picture);
-
- /**
- * set the quantification matrixes
- */
- void (*set_quant_matrix)(struct pipe_video_decoder *decoder,
- const struct pipe_quant_matrix *matrix);
-
- /**
- * set target where video data is decoded to
- */
- void (*set_decode_target)(struct pipe_video_decoder *decoder,
- struct pipe_video_buffer *target);
-
- /**
- * set reference frames for motion compensation
- */
- void (*set_reference_frames)(struct pipe_video_decoder *decoder,
- struct pipe_video_buffer **ref_frames,
- unsigned num_ref_frames);
-
/**
* start decoding of a new frame
*/
- void (*begin_frame)(struct pipe_video_decoder *decoder);
+ void (*begin_frame)(struct pipe_video_decoder *decoder,
+ struct pipe_video_buffer *target,
+ struct pipe_picture_desc *picture);
/**
* decode a macroblock
*/
void (*decode_macroblock)(struct pipe_video_decoder *decoder,
+ struct pipe_video_buffer *target,
+ struct pipe_picture_desc *picture,
const struct pipe_macroblock *macroblocks,
unsigned num_macroblocks);
* decode a bitstream
*/
void (*decode_bitstream)(struct pipe_video_decoder *decoder,
+ struct pipe_video_buffer *target,
+ struct pipe_picture_desc *picture,
unsigned num_buffers,
const void * const *buffers,
const unsigned *sizes);
/**
* end decoding of the current frame
*/
- void (*end_frame)(struct pipe_video_decoder *decoder);
+ void (*end_frame)(struct pipe_video_decoder *decoder,
+ struct pipe_video_buffer *target,
+ struct pipe_picture_desc *picture);
/**
* flush any outstanding command buffers to the hardware
unsigned full_pel_forward_vector;
unsigned full_pel_backward_vector;
unsigned num_slices;
-};
-
-struct pipe_mpeg12_quant_matrix
-{
- struct pipe_quant_matrix base;
const uint8_t *intra_matrix;
const uint8_t *non_intra_matrix;
+
+ struct pipe_video_buffer *ref[2];
};
struct pipe_mpeg12_macroblock
struct pipe_mpeg4_picture_desc
{
struct pipe_picture_desc base;
+
int32_t trd[2];
int32_t trb[2];
uint16_t vop_time_increment_resolution;
uint8_t rounding_control;
uint8_t alternate_vertical_scan_flag;
uint8_t top_field_first;
-};
-
-struct pipe_mpeg4_quant_matrix
-{
- struct pipe_quant_matrix base;
const uint8_t *intra_matrix;
const uint8_t *non_intra_matrix;
+
+ struct pipe_video_buffer *ref[2];
};
struct pipe_vc1_picture_desc
{
struct pipe_picture_desc base;
+
uint32_t slice_count;
uint8_t picture_type;
uint8_t frame_coding_mode;
uint8_t maxbframes;
uint8_t deblockEnable;
uint8_t pquant;
+
+ struct pipe_video_buffer *ref[2];
};
#ifdef __cplusplus
return VDP_STATUS_OK;
}
+static VdpStatus
+vlVdpGetReferenceFrame(VdpVideoSurface handle, struct pipe_video_buffer **ref_frame)
+{
+ vlVdpSurface *surface;
+
+ /* if surfaces equals VDP_STATUS_INVALID_HANDLE, they are not used */
+ if (handle == VDP_INVALID_HANDLE) {
+ *ref_frame = NULL;
+ return VDP_STATUS_OK;
+ }
+
+ surface = vlGetDataHTAB(handle);
+ if (!surface)
+ return VDP_STATUS_INVALID_HANDLE;
+
+ *ref_frame = surface->video_buffer;
+ if (!*ref_frame)
+ return VDP_STATUS_INVALID_HANDLE;
+
+ return VDP_STATUS_OK;
+}
+
/**
* Decode a mpeg 1/2 video.
*/
static VdpStatus
-vlVdpDecoderRenderMpeg12(struct pipe_video_decoder *decoder,
+vlVdpDecoderRenderMpeg12(struct pipe_mpeg12_picture_desc *picture,
VdpPictureInfoMPEG1Or2 *picture_info)
{
- struct pipe_mpeg12_picture_desc picture;
- struct pipe_mpeg12_quant_matrix quant;
- struct pipe_video_buffer *ref_frames[2];
- unsigned i;
+ VdpStatus r;
VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Decoding MPEG12\n");
- i = 0;
-
- /* if surfaces equals VDP_STATUS_INVALID_HANDLE, they are not used */
- if (picture_info->forward_reference != VDP_INVALID_HANDLE) {
- ref_frames[i] = ((vlVdpSurface *)vlGetDataHTAB(picture_info->forward_reference))->video_buffer;
- if (!ref_frames[i])
- return VDP_STATUS_INVALID_HANDLE;
- ++i;
- }
-
- if (picture_info->backward_reference != VDP_INVALID_HANDLE) {
- ref_frames[i] = ((vlVdpSurface *)vlGetDataHTAB(picture_info->backward_reference))->video_buffer;
- if (!ref_frames[i])
- return VDP_STATUS_INVALID_HANDLE;
- ++i;
- }
+ r = vlVdpGetReferenceFrame(picture_info->forward_reference, &picture->ref[0]);
+ if (r != VDP_STATUS_OK)
+ return r;
+
+ r = vlVdpGetReferenceFrame(picture_info->backward_reference, &picture->ref[1]);
+ if (r != VDP_STATUS_OK)
+ return r;
+
+ picture->picture_coding_type = picture_info->picture_coding_type;
+ picture->picture_structure = picture_info->picture_structure;
+ picture->frame_pred_frame_dct = picture_info->frame_pred_frame_dct;
+ picture->q_scale_type = picture_info->q_scale_type;
+ picture->alternate_scan = picture_info->alternate_scan;
+ picture->intra_vlc_format = picture_info->intra_vlc_format;
+ picture->concealment_motion_vectors = picture_info->concealment_motion_vectors;
+ picture->intra_dc_precision = picture_info->intra_dc_precision;
+ picture->f_code[0][0] = picture_info->f_code[0][0] - 1;
+ picture->f_code[0][1] = picture_info->f_code[0][1] - 1;
+ picture->f_code[1][0] = picture_info->f_code[1][0] - 1;
+ picture->f_code[1][1] = picture_info->f_code[1][1] - 1;
+ picture->num_slices = picture_info->slice_count;
+ picture->top_field_first = picture_info->top_field_first;
+ picture->full_pel_forward_vector = picture_info->full_pel_forward_vector;
+ picture->full_pel_backward_vector = picture_info->full_pel_backward_vector;
+ picture->intra_matrix = picture_info->intra_quantizer_matrix;
+ picture->non_intra_matrix = picture_info->non_intra_quantizer_matrix;
- decoder->set_reference_frames(decoder, ref_frames, i);
-
- memset(&picture, 0, sizeof(picture));
- picture.base.profile = decoder->profile;
- picture.picture_coding_type = picture_info->picture_coding_type;
- picture.picture_structure = picture_info->picture_structure;
- picture.frame_pred_frame_dct = picture_info->frame_pred_frame_dct;
- picture.q_scale_type = picture_info->q_scale_type;
- picture.alternate_scan = picture_info->alternate_scan;
- picture.intra_vlc_format = picture_info->intra_vlc_format;
- picture.concealment_motion_vectors = picture_info->concealment_motion_vectors;
- picture.intra_dc_precision = picture_info->intra_dc_precision;
- picture.f_code[0][0] = picture_info->f_code[0][0] - 1;
- picture.f_code[0][1] = picture_info->f_code[0][1] - 1;
- picture.f_code[1][0] = picture_info->f_code[1][0] - 1;
- picture.f_code[1][1] = picture_info->f_code[1][1] - 1;
- picture.num_slices = picture_info->slice_count;
- picture.top_field_first = picture_info->top_field_first;
- picture.full_pel_forward_vector = picture_info->full_pel_forward_vector;
- picture.full_pel_backward_vector = picture_info->full_pel_backward_vector;
-
- decoder->set_picture_parameters(decoder, &picture.base);
-
- memset(&quant, 0, sizeof(quant));
- quant.base.codec = PIPE_VIDEO_CODEC_MPEG12;
- quant.intra_matrix = picture_info->intra_quantizer_matrix;
- quant.non_intra_matrix = picture_info->non_intra_quantizer_matrix;
-
- decoder->set_quant_matrix(decoder, &quant.base);
return VDP_STATUS_OK;
}
* Decode a mpeg 4 video.
*/
static VdpStatus
-vlVdpDecoderRenderMpeg4(struct pipe_video_decoder *decoder,
- VdpPictureInfoMPEG4Part2 *picture_info)
+vlVdpDecoderRenderMpeg4(struct pipe_mpeg4_picture_desc *picture,
+ VdpPictureInfoMPEG4Part2 *picture_info)
{
- struct pipe_mpeg4_picture_desc picture;
- struct pipe_mpeg4_quant_matrix quant;
- struct pipe_video_buffer *ref_frames[2] = {};
+ VdpStatus r;
unsigned i;
VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Decoding MPEG4\n");
- /* if surfaces equals VDP_STATUS_INVALID_HANDLE, they are not used */
- if (picture_info->forward_reference != VDP_INVALID_HANDLE) {
- ref_frames[0] = ((vlVdpSurface *)vlGetDataHTAB(picture_info->forward_reference))->video_buffer;
- if (!ref_frames[0])
- return VDP_STATUS_INVALID_HANDLE;
- }
+ r = vlVdpGetReferenceFrame(picture_info->forward_reference, &picture->ref[0]);
+ if (r != VDP_STATUS_OK)
+ return r;
- if (picture_info->backward_reference != VDP_INVALID_HANDLE) {
- ref_frames[1] = ((vlVdpSurface *)vlGetDataHTAB(picture_info->backward_reference))->video_buffer;
- if (!ref_frames[1])
- return VDP_STATUS_INVALID_HANDLE;
- }
- decoder->set_reference_frames(decoder, ref_frames, 2);
+ r = vlVdpGetReferenceFrame(picture_info->backward_reference, &picture->ref[1]);
+ if (r != VDP_STATUS_OK)
+ return r;
- memset(&picture, 0, sizeof(picture));
- picture.base.profile = decoder->profile;
for (i = 0; i < 2; ++i) {
- picture.trd[i] = picture_info->trd[i];
- picture.trb[i] = picture_info->trb[i];
+ picture->trd[i] = picture_info->trd[i];
+ picture->trb[i] = picture_info->trb[i];
}
- picture.vop_time_increment_resolution = picture_info->vop_time_increment_resolution;
- picture.vop_coding_type = picture_info->vop_coding_type;
- picture.vop_fcode_forward = picture_info->vop_fcode_forward;
- picture.vop_fcode_backward = picture_info->vop_fcode_backward;
- picture.resync_marker_disable = picture_info->resync_marker_disable;
- picture.interlaced = picture_info->interlaced;
- picture.quant_type = picture_info->quant_type;
- picture.quarter_sample = picture_info->quarter_sample;
- picture.short_video_header = picture_info->short_video_header;
- picture.rounding_control = picture_info->rounding_control;
- picture.alternate_vertical_scan_flag = picture_info->alternate_vertical_scan_flag;
- picture.top_field_first = picture_info->top_field_first;
- decoder->set_picture_parameters(decoder, &picture.base);
-
- memset(&quant, 0, sizeof(quant));
- quant.base.codec = PIPE_VIDEO_CODEC_MPEG4;
- quant.intra_matrix = picture_info->intra_quantizer_matrix;
- quant.non_intra_matrix = picture_info->non_intra_quantizer_matrix;
- decoder->set_quant_matrix(decoder, &quant.base);
+ picture->vop_time_increment_resolution = picture_info->vop_time_increment_resolution;
+ picture->vop_coding_type = picture_info->vop_coding_type;
+ picture->vop_fcode_forward = picture_info->vop_fcode_forward;
+ picture->vop_fcode_backward = picture_info->vop_fcode_backward;
+ picture->resync_marker_disable = picture_info->resync_marker_disable;
+ picture->interlaced = picture_info->interlaced;
+ picture->quant_type = picture_info->quant_type;
+ picture->quarter_sample = picture_info->quarter_sample;
+ picture->short_video_header = picture_info->short_video_header;
+ picture->rounding_control = picture_info->rounding_control;
+ picture->alternate_vertical_scan_flag = picture_info->alternate_vertical_scan_flag;
+ picture->top_field_first = picture_info->top_field_first;
+ picture->intra_matrix = picture_info->intra_quantizer_matrix;
+ picture->non_intra_matrix = picture_info->non_intra_quantizer_matrix;
+
return VDP_STATUS_OK;
}
static VdpStatus
-vlVdpDecoderRenderVC1(struct pipe_video_decoder *decoder,
+vlVdpDecoderRenderVC1(struct pipe_vc1_picture_desc *picture,
VdpPictureInfoVC1 *picture_info)
{
- struct pipe_vc1_picture_desc picture;
- struct pipe_video_buffer *ref_frames[2] = {};
+ VdpStatus r;
VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Decoding VC-1\n");
- /* if surfaces equals VDP_STATUS_INVALID_HANDLE, they are not used */
- if (picture_info->forward_reference != VDP_INVALID_HANDLE) {
- ref_frames[0] = ((vlVdpSurface *)vlGetDataHTAB(picture_info->forward_reference))->video_buffer;
- if (!ref_frames[0])
- return VDP_STATUS_INVALID_HANDLE;
- }
+ r = vlVdpGetReferenceFrame(picture_info->forward_reference, &picture->ref[0]);
+ if (r != VDP_STATUS_OK)
+ return r;
+
+ r = vlVdpGetReferenceFrame(picture_info->backward_reference, &picture->ref[1]);
+ if (r != VDP_STATUS_OK)
+ return r;
+
+ picture->slice_count = picture_info->slice_count;
+ picture->picture_type = picture_info->picture_type;
+ picture->frame_coding_mode = picture_info->frame_coding_mode;
+ picture->postprocflag = picture_info->postprocflag;
+ picture->pulldown = picture_info->pulldown;
+ picture->interlace = picture_info->interlace;
+ picture->tfcntrflag = picture_info->tfcntrflag;
+ picture->finterpflag = picture_info->finterpflag;
+ picture->psf = picture_info->psf;
+ picture->dquant = picture_info->dquant;
+ picture->panscan_flag = picture_info->panscan_flag;
+ picture->refdist_flag = picture_info->refdist_flag;
+ picture->quantizer = picture_info->quantizer;
+ picture->extended_mv = picture_info->extended_mv;
+ picture->extended_dmv = picture_info->extended_dmv;
+ picture->overlap = picture_info->overlap;
+ picture->vstransform = picture_info->vstransform;
+ picture->loopfilter = picture_info->loopfilter;
+ picture->fastuvmc = picture_info->fastuvmc;
+ picture->range_mapy_flag = picture_info->range_mapy_flag;
+ picture->range_mapy = picture_info->range_mapy;
+ picture->range_mapuv_flag = picture_info->range_mapuv_flag;
+ picture->range_mapuv = picture_info->range_mapuv;
+ picture->multires = picture_info->multires;
+ picture->syncmarker = picture_info->syncmarker;
+ picture->rangered = picture_info->rangered;
+ picture->maxbframes = picture_info->maxbframes;
+ picture->deblockEnable = picture_info->deblockEnable;
+ picture->pquant = picture_info->pquant;
- if (picture_info->backward_reference != VDP_INVALID_HANDLE) {
- ref_frames[1] = ((vlVdpSurface *)vlGetDataHTAB(picture_info->backward_reference))->video_buffer;
- if (!ref_frames[1])
- return VDP_STATUS_INVALID_HANDLE;
- }
- decoder->set_reference_frames(decoder, ref_frames, 2);
-
- memset(&picture, 0, sizeof(picture));
- picture.base.profile = decoder->profile;
- picture.slice_count = picture_info->slice_count;
- picture.picture_type = picture_info->picture_type;
- picture.frame_coding_mode = picture_info->frame_coding_mode;
- picture.postprocflag = picture_info->postprocflag;
- picture.pulldown = picture_info->pulldown;
- picture.interlace = picture_info->interlace;
- picture.tfcntrflag = picture_info->tfcntrflag;
- picture.finterpflag = picture_info->finterpflag;
- picture.psf = picture_info->psf;
- picture.dquant = picture_info->dquant;
- picture.panscan_flag = picture_info->panscan_flag;
- picture.refdist_flag = picture_info->refdist_flag;
- picture.quantizer = picture_info->quantizer;
- picture.extended_mv = picture_info->extended_mv;
- picture.extended_dmv = picture_info->extended_dmv;
- picture.overlap = picture_info->overlap;
- picture.vstransform = picture_info->vstransform;
- picture.loopfilter = picture_info->loopfilter;
- picture.fastuvmc = picture_info->fastuvmc;
- picture.range_mapy_flag = picture_info->range_mapy_flag;
- picture.range_mapy = picture_info->range_mapy;
- picture.range_mapuv_flag = picture_info->range_mapuv_flag;
- picture.range_mapuv = picture_info->range_mapuv;
- picture.multires = picture_info->multires;
- picture.syncmarker = picture_info->syncmarker;
- picture.rangered = picture_info->rangered;
- picture.maxbframes = picture_info->maxbframes;
- picture.deblockEnable = picture_info->deblockEnable;
- picture.pquant = picture_info->pquant;
- decoder->set_picture_parameters(decoder, &picture.base);
return VDP_STATUS_OK;
}
VdpStatus ret;
struct pipe_video_decoder *dec;
unsigned i;
+ union {
+ struct pipe_picture_desc base;
+ struct pipe_mpeg12_picture_desc mpeg12;
+ struct pipe_mpeg4_picture_desc mpeg4;
+ struct pipe_vc1_picture_desc vc1;
+ } desc;
VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Decoding\n");
// TODO: Recreate decoder with correct chroma
return VDP_STATUS_INVALID_CHROMA_TYPE;
- dec->set_decode_target(dec, vlsurf->video_buffer);
-
+ memset(&desc, 0, sizeof(desc));
+ desc.base.profile = dec->profile;
switch (u_reduce_video_profile(dec->profile)) {
case PIPE_VIDEO_CODEC_MPEG12:
- ret = vlVdpDecoderRenderMpeg12(dec, (VdpPictureInfoMPEG1Or2 *)picture_info);
+ ret = vlVdpDecoderRenderMpeg12(&desc.mpeg12, (VdpPictureInfoMPEG1Or2 *)picture_info);
break;
case PIPE_VIDEO_CODEC_MPEG4:
- ret = vlVdpDecoderRenderMpeg4(dec, (VdpPictureInfoMPEG4Part2 *)picture_info);
+ ret = vlVdpDecoderRenderMpeg4(&desc.mpeg4, (VdpPictureInfoMPEG4Part2 *)picture_info);
break;
case PIPE_VIDEO_CODEC_VC1:
- ret = vlVdpDecoderRenderVC1(dec, (VdpPictureInfoVC1 *)picture_info);
+ ret = vlVdpDecoderRenderVC1(&desc.vc1, (VdpPictureInfoVC1 *)picture_info);
break;
default:
return VDP_STATUS_INVALID_DECODER_PROFILE;
if (ret != VDP_STATUS_OK)
return ret;
- dec->begin_frame(dec);
for (i = 0; i < bitstream_buffer_count; ++i) {
buffers[i] = bitstream_buffers[i].bitstream;
sizes[i] = bitstream_buffers[i].bitstream_bytes;
}
- dec->decode_bitstream(dec, bitstream_buffer_count, buffers, sizes);
- dec->end_frame(dec);
+
+ dec->begin_frame(dec, vlsurf->video_buffer, &desc.base);
+ dec->decode_bitstream(dec, vlsurf->video_buffer, &desc.base, bitstream_buffer_count, buffers, sizes);
+ dec->end_frame(dec, vlsurf->video_buffer, &desc.base);
return ret;
}
}
static void
-SetDecoderStatus(XvMCSurfacePrivate *surface)
+GetPictureDescription(XvMCSurfacePrivate *surface, struct pipe_mpeg12_picture_desc *desc)
{
- struct pipe_video_decoder *decoder;
- struct pipe_video_buffer *ref_frames[2];
- struct pipe_mpeg12_picture_desc desc = { { PIPE_VIDEO_PROFILE_MPEG1} };
-
- XvMCContextPrivate *context_priv;
-
unsigned i, num_refs = 0;
- desc.picture_structure = surface->picture_structure;
-
- assert(surface);
-
- context_priv = surface->context->privData;
- decoder = context_priv->decoder;
-
- decoder->set_decode_target(decoder, surface->video_buffer);
+ assert(surface && desc);
+ memset(desc, 0, sizeof(*desc));
+ desc->base.profile = PIPE_VIDEO_PROFILE_MPEG1;
+ desc->picture_structure = surface->picture_structure;
for (i = 0; i < 2; ++i) {
if (surface->ref[i]) {
XvMCSurfacePrivate *ref = surface->ref[i]->privData;
if (ref)
- ref_frames[num_refs++] = ref->video_buffer;
+ desc->ref[num_refs++] = ref->video_buffer;
}
}
- decoder->set_reference_frames(decoder, ref_frames, num_refs);
- decoder->set_picture_parameters(context_priv->decoder, &desc.base);
}
static void
}
if (surface->picture_structure) {
- SetDecoderStatus(surface);
+ struct pipe_mpeg12_picture_desc desc;
+ GetPictureDescription(surface, &desc);
surface->picture_structure = 0;
for (i = 0; i < 2; ++i)
surface->ref[i] = NULL;
- context_priv->decoder->end_frame(context_priv->decoder);
+ context_priv->decoder->end_frame(context_priv->decoder, surface->video_buffer, &desc.base);
}
}
{
struct pipe_mpeg12_macroblock mb[num_macroblocks];
struct pipe_video_decoder *decoder;
+ struct pipe_mpeg12_picture_desc desc;
XvMCContextPrivate *context_priv;
XvMCSurfacePrivate *target_surface_priv;
target_surface_priv->ref[1] = future_surface;
if (target_surface_priv->picture_structure)
- SetDecoderStatus(target_surface_priv);
+ GetPictureDescription(target_surface_priv, &desc);
else {
target_surface_priv->picture_structure = picture_structure;
- SetDecoderStatus(target_surface_priv);
- decoder->begin_frame(decoder);
+ GetPictureDescription(target_surface_priv, &desc);
+ decoder->begin_frame(decoder, target_surface_priv->video_buffer, &desc.base);
}
MacroBlocksToPipe(context_priv, target_surface_priv, picture_structure,
xvmc_mb, blocks, mb, num_macroblocks);
- context_priv->decoder->decode_macroblock(context_priv->decoder, &mb[0].base, num_macroblocks);
+ context_priv->decoder->decode_macroblock(context_priv->decoder,
+ target_surface_priv->video_buffer,
+ &desc.base,
+ &mb[0].base, num_macroblocks);
XVMC_MSG(XVMC_TRACE, "[XvMC] Submitted surface %p for rendering.\n", target_surface);
context_priv = surface_priv->context->privData;
if (surface_priv->picture_structure) {
- SetDecoderStatus(surface_priv);
- context_priv->decoder->end_frame(context_priv->decoder);
+ struct pipe_mpeg12_picture_desc desc;
+ GetPictureDescription(surface_priv, &desc);
+ context_priv->decoder->end_frame(context_priv->decoder, surface_priv->video_buffer, &desc.base);
}
surface_priv->video_buffer->destroy(surface_priv->video_buffer);
FREE(surface_priv);