#include <math.h>
#include <assert.h>
-#include <util/u_memory.h>
-#include <util/u_rect.h>
-#include <util/u_video.h>
+#include "util/u_memory.h"
+#include "util/u_rect.h"
+#include "util/u_sampler.h"
+#include "util/u_video.h"
#include "vl_mpeg12_decoder.h"
#include "vl_defines.h"
-#define SCALE_FACTOR_16_TO_9 (32768.0f / 256.0f)
+#define SCALE_FACTOR_SNORM (32768.0f / 256.0f)
+#define SCALE_FACTOR_SSCALED (1.0f / 256.0f)
+
+struct format_config {
+ enum pipe_format zscan_source_format;
+ enum pipe_format idct_source_format;
+ enum pipe_format mc_source_format;
+
+ float idct_scale;
+ float mc_scale;
+};
+
+static const struct format_config bitstream_format_config[] = {
+// { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SSCALED },
+// { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, 1.0f, SCALE_FACTOR_SSCALED },
+ { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SNORM },
+ { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, 1.0f, SCALE_FACTOR_SNORM }
+};
+
+static const unsigned num_bitstream_format_configs =
+ sizeof(bitstream_format_config) / sizeof(struct format_config);
+
+static const struct format_config idct_format_config[] = {
+// { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SSCALED },
+// { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, 1.0f, SCALE_FACTOR_SSCALED },
+ { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SNORM },
+ { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, 1.0f, SCALE_FACTOR_SNORM }
+};
+
+static const unsigned num_idct_format_configs =
+ sizeof(idct_format_config) / sizeof(struct format_config);
+
+static const struct format_config mc_format_config[] = {
+ //{ PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_NONE, PIPE_FORMAT_R16_SSCALED, 0.0f, SCALE_FACTOR_SSCALED },
+ { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_NONE, PIPE_FORMAT_R16_SNORM, 0.0f, SCALE_FACTOR_SNORM }
+};
+
+static const unsigned num_mc_format_configs =
+ sizeof(mc_format_config) / sizeof(struct format_config);
static const unsigned const_empty_block_mask_420[3][2][2] = {
- { { 0x20, 0x10 }, { 0x08, 0x04 } },
- { { 0x02, 0x02 }, { 0x02, 0x02 } },
- { { 0x01, 0x01 }, { 0x01, 0x01 } }
+ { { 0x20, 0x10 }, { 0x08, 0x04 } },
+ { { 0x02, 0x02 }, { 0x02, 0x02 } },
+ { { 0x01, 0x01 }, { 0x01, 0x01 } }
};
-static void
-map_buffers(struct vl_mpeg12_decoder *ctx, struct vl_mpeg12_buffer *buffer)
+static bool
+init_zscan_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer)
{
- struct pipe_sampler_view **sampler_views;
- struct pipe_resource *tex;
+ struct pipe_resource *res, res_tmpl;
+ struct pipe_sampler_view sv_tmpl;
+ struct pipe_surface **destination;
+
unsigned i;
- assert(ctx && buffer);
+ assert(dec && buffer);
- if (ctx->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
- sampler_views = buffer->idct_source->get_sampler_views(buffer->idct_source);
+ memset(&res_tmpl, 0, sizeof(res_tmpl));
+ res_tmpl.target = PIPE_TEXTURE_2D;
+ res_tmpl.format = dec->zscan_source_format;
+ res_tmpl.width0 = dec->blocks_per_line * VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT;
+ res_tmpl.height0 = align(dec->num_blocks, dec->blocks_per_line) / dec->blocks_per_line;
+ res_tmpl.depth0 = 1;
+ res_tmpl.array_size = 1;
+ res_tmpl.usage = PIPE_USAGE_STREAM;
+ res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW;
+
+ res = dec->base.context->screen->resource_create(dec->base.context->screen, &res_tmpl);
+ if (!res)
+ goto error_source;
+
+
+ memset(&sv_tmpl, 0, sizeof(sv_tmpl));
+ u_sampler_view_default_template(&sv_tmpl, res, res->format);
+ sv_tmpl.swizzle_r = sv_tmpl.swizzle_g = sv_tmpl.swizzle_b = sv_tmpl.swizzle_a = PIPE_SWIZZLE_RED;
+ buffer->zscan_source = dec->base.context->create_sampler_view(dec->base.context, res, &sv_tmpl);
+ pipe_resource_reference(&res, NULL);
+ if (!buffer->zscan_source)
+ goto error_sampler;
+
+ if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
+ destination = dec->idct_source->get_surfaces(dec->idct_source);
else
- sampler_views = buffer->mc_source->get_sampler_views(buffer->mc_source);
- assert(sampler_views);
-
- for (i = 0; i < VL_MAX_PLANES; ++i) {
- tex = sampler_views[i]->texture;
-
- struct pipe_box rect =
- {
- 0, 0, 0,
- tex->width0,
- tex->height0,
- 1
- };
-
- buffer->tex_transfer[i] = ctx->pipe->get_transfer
- (
- ctx->pipe, tex,
- 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
- &rect
- );
-
- buffer->texels[i] = ctx->pipe->transfer_map(ctx->pipe, buffer->tex_transfer[i]);
- }
+ destination = dec->mc_source->get_surfaces(dec->mc_source);
+
+ if (!destination)
+ goto error_surface;
+
+ for (i = 0; i < VL_NUM_COMPONENTS; ++i)
+ if (!vl_zscan_init_buffer(i == 0 ? &dec->zscan_y : &dec->zscan_c,
+ &buffer->zscan[i], buffer->zscan_source, destination[i]))
+ goto error_plane;
+
+ return true;
+
+error_plane:
+ for (; i > 0; --i)
+ vl_zscan_cleanup_buffer(&buffer->zscan[i - 1]);
+
+error_surface:
+error_sampler:
+ pipe_sampler_view_reference(&buffer->zscan_source, NULL);
+
+error_source:
+ return false;
}
static void
-upload_block(struct vl_mpeg12_buffer *buffer, unsigned plane, unsigned x, unsigned y, short *block)
+cleanup_zscan_buffer(struct vl_mpeg12_buffer *buffer)
{
- unsigned tex_pitch;
- short *texels;
-
unsigned i;
assert(buffer);
- assert(block);
- tex_pitch = buffer->tex_transfer[plane]->stride / sizeof(short);
- texels = buffer->texels[plane] + y * tex_pitch * BLOCK_HEIGHT + x * BLOCK_WIDTH;
+ for (i = 0; i < VL_NUM_COMPONENTS; ++i)
+ vl_zscan_cleanup_buffer(&buffer->zscan[i]);
- for (i = 0; i < BLOCK_HEIGHT; ++i)
- memcpy(texels + i * tex_pitch, block + i * BLOCK_WIDTH, BLOCK_WIDTH * sizeof(short));
+ pipe_sampler_view_reference(&buffer->zscan_source, NULL);
}
-static void
-upload_buffer(struct vl_mpeg12_decoder *ctx,
- struct vl_mpeg12_buffer *buffer,
- struct pipe_mpeg12_macroblock *mb)
+static bool
+init_idct_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer)
{
- short *blocks;
- unsigned tb, x, y;
+ struct pipe_sampler_view **idct_source_sv, **mc_source_sv;
- assert(ctx);
- assert(buffer);
- assert(mb);
+ unsigned i;
- blocks = mb->blocks;
+ assert(dec && buffer);
- for (y = 0; y < 2; ++y) {
- for (x = 0; x < 2; ++x, ++tb) {
- if (mb->cbp & (*ctx->empty_block_mask)[0][y][x]) {
- upload_block(buffer, 0, mb->mbx * 2 + x, mb->mby * 2 + y, blocks);
- blocks += BLOCK_WIDTH * BLOCK_HEIGHT;
- }
- }
- }
+ idct_source_sv = dec->idct_source->get_sampler_view_planes(dec->idct_source);
+ if (!idct_source_sv)
+ goto error_source_sv;
- /* TODO: Implement 422, 444 */
- assert(ctx->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
+ mc_source_sv = dec->mc_source->get_sampler_view_planes(dec->mc_source);
+ if (!mc_source_sv)
+ goto error_mc_source_sv;
- for (tb = 1; tb < 3; ++tb) {
- if (mb->cbp & (*ctx->empty_block_mask)[tb][0][0]) {
- upload_block(buffer, tb, mb->mbx, mb->mby, blocks);
- blocks += BLOCK_WIDTH * BLOCK_HEIGHT;
- }
- }
+ for (i = 0; i < 3; ++i)
+ if (!vl_idct_init_buffer(i == 0 ? &dec->idct_y : &dec->idct_c,
+ &buffer->idct[i], idct_source_sv[i],
+ mc_source_sv[i]))
+ goto error_plane;
+
+ return true;
+
+error_plane:
+ for (; i > 0; --i)
+ vl_idct_cleanup_buffer(&buffer->idct[i - 1]);
+
+error_mc_source_sv:
+error_source_sv:
+ return false;
}
static void
-unmap_buffers(struct vl_mpeg12_decoder *ctx, struct vl_mpeg12_buffer *buffer)
+cleanup_idct_buffer(struct vl_mpeg12_buffer *buf)
{
unsigned i;
+
+ assert(buf);
- assert(ctx && buffer);
-
- for (i = 0; i < VL_MAX_PLANES; ++i) {
- ctx->pipe->transfer_unmap(ctx->pipe, buffer->tex_transfer[i]);
- ctx->pipe->transfer_destroy(ctx->pipe, buffer->tex_transfer[i]);
- }
+ for (i = 0; i < 3; ++i)
+ vl_idct_cleanup_buffer(&buf->idct[0]);
}
-static void
-vl_mpeg12_buffer_destroy(struct pipe_video_decode_buffer *buffer)
+static bool
+init_mc_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buf)
{
- struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer;
- struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)buf->base.decoder;
- assert(buf && dec);
+ assert(dec && buf);
- if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
- buf->idct_source->destroy(buf->idct_source);
- buf->idct_intermediate->destroy(buf->idct_intermediate);
- vl_idct_cleanup_buffer(&dec->idct_y, &buf->idct[0]);
- vl_idct_cleanup_buffer(&dec->idct_c, &buf->idct[1]);
- vl_idct_cleanup_buffer(&dec->idct_c, &buf->idct[2]);
- }
- buf->mc_source->destroy(buf->mc_source);
- vl_vb_cleanup(&buf->vertex_stream);
- vl_mpeg12_mc_cleanup_buffer(&buf->mc[0]);
- vl_mpeg12_mc_cleanup_buffer(&buf->mc[1]);
- vl_mpeg12_mc_cleanup_buffer(&buf->mc[2]);
+ if(!vl_mc_init_buffer(&dec->mc_y, &buf->mc[0]))
+ goto error_mc_y;
- FREE(buf);
+ if(!vl_mc_init_buffer(&dec->mc_c, &buf->mc[1]))
+ goto error_mc_cb;
+
+ if(!vl_mc_init_buffer(&dec->mc_c, &buf->mc[2]))
+ goto error_mc_cr;
+
+ return true;
+
+error_mc_cr:
+ vl_mc_cleanup_buffer(&buf->mc[1]);
+
+error_mc_cb:
+ vl_mc_cleanup_buffer(&buf->mc[0]);
+
+error_mc_y:
+ return false;
}
static void
-vl_mpeg12_buffer_map(struct pipe_video_decode_buffer *buffer)
+cleanup_mc_buffer(struct vl_mpeg12_buffer *buf)
{
- struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer;
- struct vl_mpeg12_decoder *dec;
+ unsigned i;
+
assert(buf);
- dec = (struct vl_mpeg12_decoder *)buf->base.decoder;
- assert(dec);
+ for (i = 0; i < VL_NUM_COMPONENTS; ++i)
+ vl_mc_cleanup_buffer(&buf->mc[i]);
+}
+
+static INLINE void
+MacroBlockTypeToPipeWeights(const struct pipe_mpeg12_macroblock *mb, unsigned weights[2])
+{
+ assert(mb);
- vl_vb_map(&buf->vertex_stream, dec->pipe);
- map_buffers(dec, buf);
+ switch (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD)) {
+ case PIPE_MPEG12_MB_TYPE_MOTION_FORWARD:
+ weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
+ weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
+ break;
+
+ case (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD):
+ weights[0] = PIPE_VIDEO_MV_WEIGHT_HALF;
+ weights[1] = PIPE_VIDEO_MV_WEIGHT_HALF;
+ break;
+
+ case PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD:
+ weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
+ weights[1] = PIPE_VIDEO_MV_WEIGHT_MAX;
+ break;
+
+ default:
+ if (mb->macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA) {
+ weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
+ weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
+ } else {
+ /* no motion vector, but also not intra mb ->
+ just copy the old frame content */
+ weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
+ weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
+ }
+ break;
+ }
}
-static void
-vl_mpeg12_buffer_add_macroblocks(struct pipe_video_decode_buffer *buffer,
- unsigned num_macroblocks,
- struct pipe_macroblock *macroblocks)
+static INLINE struct vl_motionvector
+MotionVectorToPipe(const struct pipe_mpeg12_macroblock *mb, unsigned vector,
+ unsigned field_select_mask, unsigned weight)
{
- struct pipe_mpeg12_macroblock *mb = (struct pipe_mpeg12_macroblock*)macroblocks;
- struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer;
- struct vl_mpeg12_decoder *dec;
- unsigned i;
+ struct vl_motionvector mv;
- assert(buf);
+ assert(mb);
- dec = (struct vl_mpeg12_decoder*)buf->base.decoder;
- assert(dec);
+ if (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD)) {
+ switch (mb->macroblock_modes.bits.frame_motion_type) {
+ case PIPE_MPEG12_MO_TYPE_FRAME:
+ mv.top.x = mb->PMV[0][vector][0];
+ mv.top.y = mb->PMV[0][vector][1];
+ mv.top.field_select = PIPE_VIDEO_FRAME;
+ mv.top.weight = weight;
+
+ mv.bottom.x = mb->PMV[0][vector][0];
+ mv.bottom.y = mb->PMV[0][vector][1];
+ mv.bottom.weight = weight;
+ mv.bottom.field_select = PIPE_VIDEO_FRAME;
+ break;
+
+ case PIPE_MPEG12_MO_TYPE_FIELD:
+ mv.top.x = mb->PMV[0][vector][0];
+ mv.top.y = mb->PMV[0][vector][1];
+ mv.top.field_select = (mb->motion_vertical_field_select & field_select_mask) ?
+ PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
+ mv.top.weight = weight;
+
+ mv.bottom.x = mb->PMV[1][vector][0];
+ mv.bottom.y = mb->PMV[1][vector][1];
+ mv.bottom.field_select = (mb->motion_vertical_field_select & (field_select_mask << 2)) ?
+ PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
+ mv.bottom.weight = weight;
+ break;
+
+ default: // TODO: Support DUALPRIME and 16x8
+ break;
+ }
+ } else {
+ mv.top.x = mv.top.y = 0;
+ mv.top.field_select = PIPE_VIDEO_FRAME;
+ mv.top.weight = weight;
- assert(num_macroblocks);
- assert(macroblocks);
- assert(macroblocks->codec == PIPE_VIDEO_CODEC_MPEG12);
+ mv.bottom.x = mv.bottom.y = 0;
+ mv.bottom.field_select = PIPE_VIDEO_FRAME;
+ mv.bottom.weight = weight;
+ }
+ return mv;
+}
+
+static INLINE void
+UploadYcbcrBlocks(struct vl_mpeg12_decoder *dec,
+ struct vl_mpeg12_buffer *buf,
+ const struct pipe_mpeg12_macroblock *mb)
+{
+ unsigned intra;
+ unsigned tb, x, y, num_blocks = 0;
+
+ assert(dec && buf);
+ assert(mb);
+
+ if (!mb->coded_block_pattern)
+ return;
+
+ intra = mb->macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA ? 1 : 0;
+
+ for (y = 0; y < 2; ++y) {
+ for (x = 0; x < 2; ++x) {
+ if (mb->coded_block_pattern & const_empty_block_mask_420[0][y][x]) {
- for ( i = 0; i < num_macroblocks; ++i ) {
- vl_vb_add_block(&buf->vertex_stream, &mb[i], dec->empty_block_mask);
- upload_buffer(dec, buf, &mb[i]);
+ struct vl_ycbcr_block *stream = buf->ycbcr_stream[0];
+ stream->x = mb->x * 2 + x;
+ stream->y = mb->y * 2 + y;
+ stream->intra = intra;
+ stream->coding = mb->macroblock_modes.bits.dct_type;
+ stream->block_num = buf->block_num++;
+
+ buf->num_ycbcr_blocks[0]++;
+ buf->ycbcr_stream[0]++;
+
+ num_blocks++;
+ }
+ }
+ }
+
+ /* TODO: Implement 422, 444 */
+ //assert(ctx->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
+
+ for (tb = 1; tb < 3; ++tb) {
+ if (mb->coded_block_pattern & const_empty_block_mask_420[tb][0][0]) {
+
+ struct vl_ycbcr_block *stream = buf->ycbcr_stream[tb];
+ stream->x = mb->x;
+ stream->y = mb->y;
+ stream->intra = intra;
+ stream->coding = 0;
+ stream->block_num = buf->block_num++;
+
+ buf->num_ycbcr_blocks[tb]++;
+ buf->ycbcr_stream[tb]++;
+
+ num_blocks++;
+ }
}
+
+ memcpy(buf->texels, mb->blocks, 64 * sizeof(short) * num_blocks);
+ buf->texels += 64 * num_blocks;
}
static void
-vl_mpeg12_buffer_unmap(struct pipe_video_decode_buffer *buffer)
+vl_mpeg12_destroy_buffer(void *buffer)
{
- struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer;
- struct vl_mpeg12_decoder *dec;
+ struct vl_mpeg12_buffer *buf = buffer;
+
assert(buf);
- dec = (struct vl_mpeg12_decoder *)buf->base.decoder;
- assert(dec);
+ cleanup_zscan_buffer(buf);
+ cleanup_idct_buffer(buf);
+ cleanup_mc_buffer(buf);
+ vl_vb_cleanup(&buf->vertex_stream);
- vl_vb_unmap(&buf->vertex_stream, dec->pipe);
- unmap_buffers(dec, buf);
+ FREE(buf);
}
static void
vl_mpeg12_destroy(struct pipe_video_decoder *decoder)
{
struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
+ unsigned i;
assert(decoder);
/* Asserted in softpipe_delete_fs_state() for some reason */
- dec->pipe->bind_vs_state(dec->pipe, NULL);
- dec->pipe->bind_fs_state(dec->pipe, NULL);
+ dec->base.context->bind_vs_state(dec->base.context, NULL);
+ dec->base.context->bind_fs_state(dec->base.context, NULL);
- dec->pipe->delete_blend_state(dec->pipe, dec->blend);
- dec->pipe->delete_rasterizer_state(dec->pipe, dec->rast);
- dec->pipe->delete_depth_stencil_alpha_state(dec->pipe, dec->dsa);
+ dec->base.context->delete_depth_stencil_alpha_state(dec->base.context, dec->dsa);
+ dec->base.context->delete_sampler_state(dec->base.context, dec->sampler_ycbcr);
+
+ vl_mc_cleanup(&dec->mc_y);
+ vl_mc_cleanup(&dec->mc_c);
+ dec->mc_source->destroy(dec->mc_source);
- vl_mpeg12_mc_renderer_cleanup(&dec->mc);
if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
vl_idct_cleanup(&dec->idct_y);
vl_idct_cleanup(&dec->idct_c);
+ dec->idct_source->destroy(dec->idct_source);
}
- dec->pipe->delete_vertex_elements_state(dec->pipe, dec->ves[0]);
- dec->pipe->delete_vertex_elements_state(dec->pipe, dec->ves[1]);
- dec->pipe->delete_vertex_elements_state(dec->pipe, dec->ves[2]);
+
+ vl_zscan_cleanup(&dec->zscan_y);
+ vl_zscan_cleanup(&dec->zscan_c);
+
+ dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
+ dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_mv);
+
pipe_resource_reference(&dec->quads.buffer, NULL);
+ pipe_resource_reference(&dec->pos.buffer, NULL);
+
+ pipe_sampler_view_reference(&dec->zscan_linear, NULL);
+ pipe_sampler_view_reference(&dec->zscan_normal, NULL);
+ pipe_sampler_view_reference(&dec->zscan_alternate, NULL);
+
+ for (i = 0; i < 4; ++i)
+ if (dec->dec_buffers[i])
+ vl_mpeg12_destroy_buffer(dec->dec_buffers[i]);
FREE(dec);
}
-static struct pipe_video_decode_buffer *
-vl_mpeg12_create_buffer(struct pipe_video_decoder *decoder)
+static struct vl_mpeg12_buffer *
+vl_mpeg12_get_decode_buffer(struct vl_mpeg12_decoder *dec, struct pipe_video_buffer *target)
{
- const enum pipe_format idct_source_formats[3] = {
- PIPE_FORMAT_R16G16B16A16_SNORM,
- PIPE_FORMAT_R16G16B16A16_SNORM,
- PIPE_FORMAT_R16G16B16A16_SNORM
- };
-
- const enum pipe_format mc_source_formats[3] = {
- PIPE_FORMAT_R16_SNORM,
- PIPE_FORMAT_R16_SNORM,
- PIPE_FORMAT_R16_SNORM
- };
-
- struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
struct vl_mpeg12_buffer *buffer;
- struct pipe_sampler_view **idct_source_sv, **idct_intermediate_sv, **mc_source_sv;
- struct pipe_surface **idct_surfaces;
-
assert(dec);
+ buffer = vl_video_buffer_get_associated_data(target, &dec->base);
+ if (buffer)
+ return buffer;
+
+ buffer = dec->dec_buffers[dec->current_buffer];
+ if (buffer)
+ return buffer;
+
buffer = CALLOC_STRUCT(vl_mpeg12_buffer);
if (buffer == NULL)
return NULL;
- buffer->base.decoder = decoder;
- buffer->base.destroy = vl_mpeg12_buffer_destroy;
- buffer->base.map = vl_mpeg12_buffer_map;
- buffer->base.add_macroblocks = vl_mpeg12_buffer_add_macroblocks;
- buffer->base.unmap = vl_mpeg12_buffer_unmap;
-
- buffer->vertex_bufs.individual.quad.stride = dec->quads.stride;
- buffer->vertex_bufs.individual.quad.buffer_offset = dec->quads.buffer_offset;
- pipe_resource_reference(&buffer->vertex_bufs.individual.quad.buffer, dec->quads.buffer);
-
- buffer->vertex_bufs.individual.stream = vl_vb_init(&buffer->vertex_stream, dec->pipe,
- dec->base.width / MACROBLOCK_WIDTH *
- dec->base.height / MACROBLOCK_HEIGHT);
- if (!buffer->vertex_bufs.individual.stream.buffer)
- goto error_vertex_stream;
-
- buffer->mc_source = vl_video_buffer_init(dec->base.context, dec->pipe,
- dec->base.width, dec->base.height, 1,
- dec->base.chroma_format, 3,
- mc_source_formats,
- PIPE_USAGE_STATIC);
-
- if (!buffer->mc_source)
- goto error_mc_source;
+ if (!vl_vb_init(&buffer->vertex_stream, dec->base.context,
+ dec->base.width / VL_MACROBLOCK_WIDTH,
+ dec->base.height / VL_MACROBLOCK_HEIGHT))
+ goto error_vertex_buffer;
- if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
- buffer->idct_source = vl_video_buffer_init(dec->base.context, dec->pipe,
- dec->base.width / 4, dec->base.height, 1,
- dec->base.chroma_format, 3,
- idct_source_formats,
- PIPE_USAGE_STREAM);
- if (!buffer->idct_source)
- goto error_idct_source;
-
- buffer->idct_intermediate = vl_video_buffer_init(dec->base.context, dec->pipe,
- dec->base.width / 4, dec->base.height / 4, 4,
- dec->base.chroma_format, 3,
- idct_source_formats,
- PIPE_USAGE_STATIC);
-
- if (!buffer->idct_intermediate)
- goto error_idct_intermediate;
-
- idct_source_sv = buffer->idct_source->get_sampler_views(buffer->idct_source);
- if (!idct_source_sv)
- goto error_idct_source_sv;
-
- idct_intermediate_sv = buffer->idct_intermediate->get_sampler_views(buffer->idct_intermediate);
- if (!idct_intermediate_sv)
- goto error_idct_intermediate_sv;
-
- idct_surfaces = buffer->mc_source->get_surfaces(buffer->mc_source);
- if (!idct_surfaces)
- goto error_idct_surfaces;
-
- if (!vl_idct_init_buffer(&dec->idct_y, &buffer->idct[0],
- idct_source_sv[0],
- idct_intermediate_sv[0],
- idct_surfaces[0]))
- goto error_idct_y;
-
- if (!vl_idct_init_buffer(&dec->idct_c, &buffer->idct[1],
- idct_source_sv[1],
- idct_intermediate_sv[1],
- idct_surfaces[1]))
- goto error_idct_cb;
-
- if (!vl_idct_init_buffer(&dec->idct_c, &buffer->idct[2],
- idct_source_sv[2],
- idct_intermediate_sv[2],
- idct_surfaces[2]))
- goto error_idct_cr;
- }
+ if (!init_mc_buffer(dec, buffer))
+ goto error_mc;
- mc_source_sv = buffer->mc_source->get_sampler_views(buffer->mc_source);
- if (!mc_source_sv)
- goto error_mc_source_sv;
+ if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
+ if (!init_idct_buffer(dec, buffer))
+ goto error_idct;
- if(!vl_mpeg12_mc_init_buffer(&dec->mc, &buffer->mc[0], mc_source_sv[0]))
- goto error_mc_y;
+ if (!init_zscan_buffer(dec, buffer))
+ goto error_zscan;
- if(!vl_mpeg12_mc_init_buffer(&dec->mc, &buffer->mc[1], mc_source_sv[1]))
- goto error_mc_cb;
+ if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
+ vl_mpg12_bs_init(&buffer->bs, &dec->base);
- if(!vl_mpeg12_mc_init_buffer(&dec->mc, &buffer->mc[2], mc_source_sv[2]))
- goto error_mc_cr;
+ if (dec->expect_chunked_decode)
+ vl_video_buffer_set_associated_data(target, &dec->base,
+ buffer, vl_mpeg12_destroy_buffer);
+ else
+ dec->dec_buffers[dec->current_buffer] = buffer;
- return &buffer->base;
+ return buffer;
-error_mc_cr:
- vl_mpeg12_mc_cleanup_buffer(&buffer->mc[1]);
+error_zscan:
+ cleanup_idct_buffer(buffer);
-error_mc_cb:
- vl_mpeg12_mc_cleanup_buffer(&buffer->mc[0]);
+error_idct:
+ cleanup_mc_buffer(buffer);
-error_mc_y:
-error_mc_source_sv:
- if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
- vl_idct_cleanup_buffer(&dec->idct_c, &buffer->idct[2]);
+error_mc:
+ vl_vb_cleanup(&buffer->vertex_stream);
-error_idct_cr:
- if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
- vl_idct_cleanup_buffer(&dec->idct_c, &buffer->idct[1]);
+error_vertex_buffer:
+ FREE(buffer);
+ return NULL;
+}
-error_idct_cb:
- if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
- vl_idct_cleanup_buffer(&dec->idct_y, &buffer->idct[0]);
+static void
+vl_mpeg12_begin_frame(struct pipe_video_decoder *decoder,
+ struct pipe_video_buffer *target,
+ struct pipe_picture_desc *picture)
+{
+ struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
+ struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
+ struct vl_mpeg12_buffer *buf;
-error_idct_y:
-error_idct_surfaces:
-error_idct_intermediate_sv:
-error_idct_source_sv:
- if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
- buffer->idct_intermediate->destroy(buffer->idct_intermediate);
+ struct pipe_resource *tex;
+ struct pipe_box rect = { 0, 0, 0, 1, 1, 1 };
-error_idct_intermediate:
- if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
- buffer->idct_source->destroy(buffer->idct_source);
+ uint8_t intra_matrix[64];
+ uint8_t non_intra_matrix[64];
-error_idct_source:
- buffer->mc_source->destroy(buffer->mc_source);
+ unsigned i;
-error_mc_source:
- vl_vb_cleanup(&buffer->vertex_stream);
+ assert(dec && target && picture);
-error_vertex_stream:
- FREE(buffer);
- return NULL;
+ buf = vl_mpeg12_get_decode_buffer(dec, target);
+ assert(buf);
+
+ if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
+ memcpy(intra_matrix, desc->intra_matrix, sizeof(intra_matrix));
+ memcpy(non_intra_matrix, desc->non_intra_matrix, sizeof(non_intra_matrix));
+ intra_matrix[0] = 1 << (7 - desc->intra_dc_precision);
+ } else {
+ memset(intra_matrix, 0x10, sizeof(intra_matrix));
+ memset(non_intra_matrix, 0x10, sizeof(non_intra_matrix));
+ }
+
+ for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
+ struct vl_zscan *zscan = i == 0 ? &dec->zscan_y : &dec->zscan_c;
+ vl_zscan_upload_quant(zscan, &buf->zscan[i], intra_matrix, true);
+ vl_zscan_upload_quant(zscan, &buf->zscan[i], non_intra_matrix, false);
+ }
+
+ vl_vb_map(&buf->vertex_stream, dec->base.context);
+
+ tex = buf->zscan_source->texture;
+ rect.width = tex->width0;
+ rect.height = tex->height0;
+
+ buf->texels =
+ dec->base.context->transfer_map(dec->base.context, tex, 0,
+ PIPE_TRANSFER_WRITE |
+ PIPE_TRANSFER_DISCARD_RANGE,
+ &rect, &buf->tex_transfer);
+
+ buf->block_num = 0;
+
+ for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
+ buf->ycbcr_stream[i] = vl_vb_get_ycbcr_stream(&buf->vertex_stream, i);
+ buf->num_ycbcr_blocks[i] = 0;
+ }
+
+ for (i = 0; i < VL_MAX_REF_FRAMES; ++i)
+ buf->mv_stream[i] = vl_vb_get_mv_stream(&buf->vertex_stream, i);
+
+ if (dec->base.entrypoint >= PIPE_VIDEO_ENTRYPOINT_IDCT) {
+ for (i = 0; i < VL_NUM_COMPONENTS; ++i)
+ vl_zscan_set_layout(&buf->zscan[i], dec->zscan_linear);
+ }
}
static void
-vl_mpeg12_decoder_flush_buffer(struct pipe_video_decode_buffer *buffer,
- struct pipe_video_buffer *refs[2],
- struct pipe_video_buffer *dst,
- struct pipe_fence_handle **fence)
+vl_mpeg12_decode_macroblock(struct pipe_video_decoder *decoder,
+ struct pipe_video_buffer *target,
+ struct pipe_picture_desc *picture,
+ const struct pipe_macroblock *macroblocks,
+ unsigned num_macroblocks)
{
- struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer *)buffer;
- struct vl_mpeg12_decoder *dec;
+ struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
+ const struct pipe_mpeg12_macroblock *mb = (const struct pipe_mpeg12_macroblock *)macroblocks;
+ struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
+ struct vl_mpeg12_buffer *buf;
- struct pipe_sampler_view **sv_past;
- struct pipe_sampler_view **sv_future;
- struct pipe_surface **surfaces;
+ unsigned i, j, mv_weights[2];
- struct pipe_sampler_view *sv_refs[2];
- unsigned ne_start, ne_num, e_start, e_num;
- unsigned i;
+ assert(dec && target && picture);
+ assert(macroblocks && macroblocks->codec == PIPE_VIDEO_CODEC_MPEG12);
+ buf = vl_mpeg12_get_decode_buffer(dec, target);
assert(buf);
- dec = (struct vl_mpeg12_decoder *)buf->base.decoder;
- assert(dec);
+ for (; num_macroblocks > 0; --num_macroblocks) {
+ unsigned mb_addr = mb->y * dec->width_in_macroblocks + mb->x;
+
+ if (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_PATTERN | PIPE_MPEG12_MB_TYPE_INTRA))
+ UploadYcbcrBlocks(dec, buf, mb);
- sv_past = refs[0] ? refs[0]->get_sampler_views(refs[0]) : NULL;
- sv_future = refs[1] ? refs[1]->get_sampler_views(refs[1]) : NULL;
+ MacroBlockTypeToPipeWeights(mb, mv_weights);
- surfaces = dst->get_surfaces(dst);
+ for (i = 0; i < 2; ++i) {
+ if (!desc->ref[i]) continue;
- vl_vb_restart(&buf->vertex_stream, &ne_start, &ne_num, &e_start, &e_num);
+ buf->mv_stream[i][mb_addr] = MotionVectorToPipe
+ (
+ mb, i,
+ i ? PIPE_MPEG12_FS_FIRST_BACKWARD : PIPE_MPEG12_FS_FIRST_FORWARD,
+ mv_weights[i]
+ );
+ }
- dec->pipe->set_vertex_buffers(dec->pipe, 2, buf->vertex_bufs.all);
- dec->pipe->bind_blend_state(dec->pipe, dec->blend);
+ /* see section 7.6.6 of the spec */
+ if (mb->num_skipped_macroblocks > 0) {
+ struct vl_motionvector skipped_mv[2];
- for (i = 0; i < VL_MAX_PLANES; ++i) {
- dec->pipe->bind_vertex_elements_state(dec->pipe, dec->ves[i]);
+ if (desc->ref[0] && !desc->ref[1]) {
+ skipped_mv[0].top.x = skipped_mv[0].top.y = 0;
+ skipped_mv[0].top.weight = PIPE_VIDEO_MV_WEIGHT_MAX;
+ } else {
+ skipped_mv[0] = buf->mv_stream[0][mb_addr];
+ skipped_mv[1] = buf->mv_stream[1][mb_addr];
+ }
+ skipped_mv[0].top.field_select = PIPE_VIDEO_FRAME;
+ skipped_mv[1].top.field_select = PIPE_VIDEO_FRAME;
- if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
- vl_idct_flush(i == 0 ? &dec->idct_y : &dec->idct_c, &buf->idct[i], ne_num);
+ skipped_mv[0].bottom = skipped_mv[0].top;
+ skipped_mv[1].bottom = skipped_mv[1].top;
- sv_refs[0] = sv_past ? sv_past[i] : NULL;
- sv_refs[1] = sv_future ? sv_future[i] : NULL;
+ ++mb_addr;
+ for (i = 0; i < mb->num_skipped_macroblocks; ++i, ++mb_addr) {
+ for (j = 0; j < 2; ++j) {
+ if (!desc->ref[j]) continue;
+ buf->mv_stream[j][mb_addr] = skipped_mv[j];
+
+ }
+ }
+ }
- vl_mpeg12_mc_renderer_flush(&dec->mc, &buf->mc[i], surfaces[i], sv_refs,
- ne_start, ne_num, e_start, e_num, fence);
+ ++mb;
}
}
static void
-vl_mpeg12_decoder_clear_buffer(struct pipe_video_decode_buffer *buffer)
+vl_mpeg12_decode_bitstream(struct pipe_video_decoder *decoder,
+ struct pipe_video_buffer *target,
+ struct pipe_picture_desc *picture,
+ unsigned num_buffers,
+ const void * const *buffers,
+ const unsigned *sizes)
{
- struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer *)buffer;
- unsigned ne_start, ne_num, e_start, e_num;
+ struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
+ struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
+ struct vl_mpeg12_buffer *buf;
+
+ unsigned i;
+
+ assert(dec && target && picture);
+ buf = vl_mpeg12_get_decode_buffer(dec, target);
assert(buf);
- vl_vb_restart(&buf->vertex_stream, &ne_start, &ne_num, &e_start, &e_num);
+ for (i = 0; i < VL_NUM_COMPONENTS; ++i)
+ vl_zscan_set_layout(&buf->zscan[i], desc->alternate_scan ?
+ dec->zscan_alternate : dec->zscan_normal);
+
+ vl_mpg12_bs_decode(&buf->bs, target, desc, num_buffers, buffers, sizes);
+}
+
+static void
+vl_mpeg12_end_frame(struct pipe_video_decoder *decoder,
+ struct pipe_video_buffer *target,
+ struct pipe_picture_desc *picture)
+{
+ struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
+ struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
+ struct pipe_sampler_view **ref_frames[2];
+ struct pipe_sampler_view **mc_source_sv;
+ struct pipe_surface **target_surfaces;
+ struct pipe_vertex_buffer vb[3];
+ struct vl_mpeg12_buffer *buf;
+
+ const unsigned *plane_order;
+ unsigned i, j, component;
+ unsigned nr_components;
+
+ assert(dec && target && picture);
+ assert(!target->interlaced);
+
+ buf = vl_mpeg12_get_decode_buffer(dec, target);
+
+ vl_vb_unmap(&buf->vertex_stream, dec->base.context);
+
+ dec->base.context->transfer_unmap(dec->base.context, buf->tex_transfer);
+
+ vb[0] = dec->quads;
+ vb[1] = dec->pos;
+
+ target_surfaces = target->get_surfaces(target);
+
+ for (i = 0; i < VL_MAX_REF_FRAMES; ++i) {
+ if (desc->ref[i])
+ ref_frames[i] = desc->ref[i]->get_sampler_view_planes(desc->ref[i]);
+ else
+ ref_frames[i] = NULL;
+ }
+
+ dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_mv);
+ for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
+ if (!target_surfaces[i]) continue;
+
+ vl_mc_set_surface(&buf->mc[i], target_surfaces[i]);
+
+ for (j = 0; j < VL_MAX_REF_FRAMES; ++j) {
+ if (!ref_frames[j] || !ref_frames[j][i]) continue;
+
+ vb[2] = vl_vb_get_mv(&buf->vertex_stream, j);;
+ dec->base.context->set_vertex_buffers(dec->base.context, 0, 3, vb);
+
+ vl_mc_render_ref(i ? &dec->mc_c : &dec->mc_y, &buf->mc[i], ref_frames[j][i]);
+ }
+ }
+
+ dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
+ for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
+ if (!buf->num_ycbcr_blocks[i]) continue;
+
+ vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, i);
+ dec->base.context->set_vertex_buffers(dec->base.context, 0, 2, vb);
+
+ vl_zscan_render(i ? &dec->zscan_c : & dec->zscan_y, &buf->zscan[i] , buf->num_ycbcr_blocks[i]);
+
+ if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
+ vl_idct_flush(i ? &dec->idct_c : &dec->idct_y, &buf->idct[i], buf->num_ycbcr_blocks[i]);
+ }
+
+ plane_order = vl_video_buffer_plane_order(target->buffer_format);
+ mc_source_sv = dec->mc_source->get_sampler_view_planes(dec->mc_source);
+ for (i = 0, component = 0; component < VL_NUM_COMPONENTS; ++i) {
+ if (!target_surfaces[i]) continue;
+
+ nr_components = util_format_get_nr_components(target_surfaces[i]->texture->format);
+ for (j = 0; j < nr_components; ++j, ++component) {
+ unsigned plane = plane_order[component];
+ if (!buf->num_ycbcr_blocks[plane]) continue;
+
+ vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, plane);
+ dec->base.context->set_vertex_buffers(dec->base.context, 0, 2, vb);
+
+ if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
+ vl_idct_prepare_stage2(i ? &dec->idct_c : &dec->idct_y, &buf->idct[plane]);
+ else {
+ dec->base.context->set_fragment_sampler_views(dec->base.context, 1, &mc_source_sv[plane]);
+ dec->base.context->bind_fragment_sampler_states(dec->base.context, 1, &dec->sampler_ycbcr);
+ }
+ vl_mc_render_ycbcr(i ? &dec->mc_c : &dec->mc_y, &buf->mc[i], j, buf->num_ycbcr_blocks[plane]);
+ }
+ }
+ ++dec->current_buffer;
+ dec->current_buffer %= 4;
+}
+
+static void
+vl_mpeg12_flush(struct pipe_video_decoder *decoder)
+{
+ assert(decoder);
+
+ //Noop, for shaders it is much faster to flush everything in end_frame
}
static bool
init_pipe_state(struct vl_mpeg12_decoder *dec)
{
- struct pipe_rasterizer_state rast;
- struct pipe_blend_state blend;
struct pipe_depth_stencil_alpha_state dsa;
+ struct pipe_sampler_state sampler;
unsigned i;
assert(dec);
- memset(&rast, 0, sizeof rast);
- rast.flatshade = 1;
- rast.flatshade_first = 0;
- rast.light_twoside = 0;
- rast.front_ccw = 1;
- rast.cull_face = PIPE_FACE_NONE;
- rast.fill_back = PIPE_POLYGON_MODE_FILL;
- rast.fill_front = PIPE_POLYGON_MODE_FILL;
- rast.offset_point = 0;
- rast.offset_line = 0;
- rast.scissor = 0;
- rast.poly_smooth = 0;
- rast.poly_stipple_enable = 0;
- rast.sprite_coord_enable = 0;
- rast.point_size_per_vertex = 0;
- rast.multisample = 0;
- rast.line_smooth = 0;
- rast.line_stipple_enable = 0;
- rast.line_stipple_factor = 0;
- rast.line_stipple_pattern = 0;
- rast.line_last_pixel = 0;
- rast.line_width = 1;
- rast.point_smooth = 0;
- rast.point_quad_rasterization = 0;
- rast.point_size_per_vertex = 1;
- rast.offset_units = 1;
- rast.offset_scale = 1;
- rast.gl_rasterization_rules = 1;
-
- dec->rast = dec->pipe->create_rasterizer_state(dec->pipe, &rast);
- dec->pipe->bind_rasterizer_state(dec->pipe, dec->rast);
-
- memset(&blend, 0, sizeof blend);
-
- blend.independent_blend_enable = 0;
- blend.rt[0].blend_enable = 0;
- blend.rt[0].rgb_func = PIPE_BLEND_ADD;
- blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_ONE;
- blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
- blend.rt[0].alpha_func = PIPE_BLEND_ADD;
- blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
- blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
- blend.logicop_enable = 0;
- blend.logicop_func = PIPE_LOGICOP_CLEAR;
- /* Needed to allow color writes to FB, even if blending disabled */
- blend.rt[0].colormask = PIPE_MASK_RGBA;
- blend.dither = 0;
- dec->blend = dec->pipe->create_blend_state(dec->pipe, &blend);
-
memset(&dsa, 0, sizeof dsa);
dsa.depth.enabled = 0;
dsa.depth.writemask = 0;
dsa.alpha.enabled = 0;
dsa.alpha.func = PIPE_FUNC_ALWAYS;
dsa.alpha.ref_value = 0;
- dec->dsa = dec->pipe->create_depth_stencil_alpha_state(dec->pipe, &dsa);
- dec->pipe->bind_depth_stencil_alpha_state(dec->pipe, dec->dsa);
+ dec->dsa = dec->base.context->create_depth_stencil_alpha_state(dec->base.context, &dsa);
+ dec->base.context->bind_depth_stencil_alpha_state(dec->base.context, dec->dsa);
+
+ memset(&sampler, 0, sizeof(sampler));
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
+ sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
+ sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
+ sampler.compare_func = PIPE_FUNC_ALWAYS;
+ sampler.normalized_coords = 1;
+ dec->sampler_ycbcr = dec->base.context->create_sampler_state(dec->base.context, &sampler);
+ if (!dec->sampler_ycbcr)
+ return false;
+
+ return true;
+}
+
+static const struct format_config*
+find_format_config(struct vl_mpeg12_decoder *dec, const struct format_config configs[], unsigned num_configs)
+{
+ struct pipe_screen *screen;
+ unsigned i;
+
+ assert(dec);
+
+ screen = dec->base.context->screen;
+
+ for (i = 0; i < num_configs; ++i) {
+ if (!screen->is_format_supported(screen, configs[i].zscan_source_format, PIPE_TEXTURE_2D,
+ 1, PIPE_BIND_SAMPLER_VIEW))
+ continue;
+
+ if (configs[i].idct_source_format != PIPE_FORMAT_NONE) {
+ if (!screen->is_format_supported(screen, configs[i].idct_source_format, PIPE_TEXTURE_2D,
+ 1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
+ continue;
+
+ if (!screen->is_format_supported(screen, configs[i].mc_source_format, PIPE_TEXTURE_3D,
+ 1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
+ continue;
+ } else {
+ if (!screen->is_format_supported(screen, configs[i].mc_source_format, PIPE_TEXTURE_2D,
+ 1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
+ continue;
+ }
+ return &configs[i];
+ }
+
+ return NULL;
+}
+
+static bool
+init_zscan(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
+{
+ unsigned num_channels;
+
+ assert(dec);
+
+ dec->zscan_source_format = format_config->zscan_source_format;
+ dec->zscan_linear = vl_zscan_layout(dec->base.context, vl_zscan_linear, dec->blocks_per_line);
+ dec->zscan_normal = vl_zscan_layout(dec->base.context, vl_zscan_normal, dec->blocks_per_line);
+ dec->zscan_alternate = vl_zscan_layout(dec->base.context, vl_zscan_alternate, dec->blocks_per_line);
+
+ num_channels = dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT ? 4 : 1;
+
+ if (!vl_zscan_init(&dec->zscan_y, dec->base.context, dec->base.width, dec->base.height,
+ dec->blocks_per_line, dec->num_blocks, num_channels))
+ return false;
+
+ if (!vl_zscan_init(&dec->zscan_c, dec->base.context, dec->chroma_width, dec->chroma_height,
+ dec->blocks_per_line, dec->num_blocks, num_channels))
+ return false;
return true;
}
static bool
-init_idct(struct vl_mpeg12_decoder *dec, unsigned buffer_width, unsigned buffer_height)
+init_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
{
- unsigned chroma_width, chroma_height, chroma_blocks_x, chroma_blocks_y;
- struct pipe_sampler_view *idct_matrix;
+ unsigned nr_of_idct_render_targets, max_inst;
+ enum pipe_format formats[3];
+ struct pipe_video_buffer templat;
+
+ struct pipe_sampler_view *matrix = NULL;
+
+ nr_of_idct_render_targets = dec->base.context->screen->get_param
+ (
+ dec->base.context->screen, PIPE_CAP_MAX_RENDER_TARGETS
+ );
+
+ max_inst = dec->base.context->screen->get_shader_param
+ (
+ dec->base.context->screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_INSTRUCTIONS
+ );
+
+ // Just assume we need 32 inst per render target, not 100% true, but should work in most cases
+ if (nr_of_idct_render_targets >= 4 && max_inst >= 32*4)
+ // more than 4 render targets usually doesn't makes any seens
+ nr_of_idct_render_targets = 4;
+ else
+ nr_of_idct_render_targets = 1;
+
+ formats[0] = formats[1] = formats[2] = format_config->idct_source_format;
+ memset(&templat, 0, sizeof(templat));
+ templat.width = dec->base.width / 4;
+ templat.height = dec->base.height;
+ templat.chroma_format = dec->base.chroma_format;
+ dec->idct_source = vl_video_buffer_create_ex
+ (
+ dec->base.context, &templat,
+ formats, 1, PIPE_USAGE_STATIC
+ );
+
+ if (!dec->idct_source)
+ goto error_idct_source;
+
+ formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
+ memset(&templat, 0, sizeof(templat));
+ templat.width = dec->base.width / nr_of_idct_render_targets;
+ templat.height = dec->base.height / 4;
+ templat.chroma_format = dec->base.chroma_format;
+ dec->mc_source = vl_video_buffer_create_ex
+ (
+ dec->base.context, &templat,
+ formats, nr_of_idct_render_targets, PIPE_USAGE_STATIC
+ );
+
+ if (!dec->mc_source)
+ goto error_mc_source;
- if (!(idct_matrix = vl_idct_upload_matrix(dec->pipe, sqrt(SCALE_FACTOR_16_TO_9))))
- goto error_idct_matrix;
+ if (!(matrix = vl_idct_upload_matrix(dec->base.context, format_config->idct_scale)))
+ goto error_matrix;
- if (!vl_idct_init(&dec->idct_y, dec->pipe, buffer_width, buffer_height,
- 2, 2, idct_matrix))
- goto error_idct_y;
+ if (!vl_idct_init(&dec->idct_y, dec->base.context, dec->base.width, dec->base.height,
+ nr_of_idct_render_targets, matrix, matrix))
+ goto error_y;
- if (dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
- chroma_width = buffer_width / 2;
- chroma_height = buffer_height / 2;
- chroma_blocks_x = 1;
- chroma_blocks_y = 1;
- } else if (dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422) {
- chroma_width = buffer_width;
- chroma_height = buffer_height / 2;
- chroma_blocks_x = 2;
- chroma_blocks_y = 1;
- } else {
- chroma_width = buffer_width;
- chroma_height = buffer_height;
- chroma_blocks_x = 2;
- chroma_blocks_y = 2;
- }
+ if(!vl_idct_init(&dec->idct_c, dec->base.context, dec->chroma_width, dec->chroma_height,
+ nr_of_idct_render_targets, matrix, matrix))
+ goto error_c;
- if(!vl_idct_init(&dec->idct_c, dec->pipe, chroma_width, chroma_height,
- chroma_blocks_x, chroma_blocks_y, idct_matrix))
- goto error_idct_c;
+ pipe_sampler_view_reference(&matrix, NULL);
- pipe_sampler_view_reference(&idct_matrix, NULL);
return true;
-error_idct_c:
+error_c:
vl_idct_cleanup(&dec->idct_y);
-error_idct_y:
- pipe_sampler_view_reference(&idct_matrix, NULL);
+error_y:
+ pipe_sampler_view_reference(&matrix, NULL);
+
+error_matrix:
+ dec->mc_source->destroy(dec->mc_source);
+
+error_mc_source:
+ dec->idct_source->destroy(dec->idct_source);
-error_idct_matrix:
+error_idct_source:
return false;
}
+static bool
+init_mc_source_widthout_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
+{
+ enum pipe_format formats[3];
+ struct pipe_video_buffer templat;
+
+ formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
+ memset(&templat, 0, sizeof(templat));
+ templat.width = dec->base.width;
+ templat.height = dec->base.height;
+ templat.chroma_format = dec->base.chroma_format;
+ dec->mc_source = vl_video_buffer_create_ex
+ (
+ dec->base.context, &templat,
+ formats, 1, PIPE_USAGE_STATIC
+ );
+
+ return dec->mc_source != NULL;
+}
+
+static void
+mc_vert_shader_callback(void *priv, struct vl_mc *mc,
+ struct ureg_program *shader,
+ unsigned first_output,
+ struct ureg_dst tex)
+{
+ struct vl_mpeg12_decoder *dec = priv;
+ struct ureg_dst o_vtex;
+
+ assert(priv && mc);
+ assert(shader);
+
+ if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
+ struct vl_idct *idct = mc == &dec->mc_y ? &dec->idct_y : &dec->idct_c;
+ vl_idct_stage2_vert_shader(idct, shader, first_output, tex);
+ } else {
+ o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, first_output);
+ ureg_MOV(shader, ureg_writemask(o_vtex, TGSI_WRITEMASK_XY), ureg_src(tex));
+ }
+}
+
+static void
+mc_frag_shader_callback(void *priv, struct vl_mc *mc,
+ struct ureg_program *shader,
+ unsigned first_input,
+ struct ureg_dst dst)
+{
+ struct vl_mpeg12_decoder *dec = priv;
+ struct ureg_src src, sampler;
+
+ assert(priv && mc);
+ assert(shader);
+
+ if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
+ struct vl_idct *idct = mc == &dec->mc_y ? &dec->idct_y : &dec->idct_c;
+ vl_idct_stage2_frag_shader(idct, shader, first_input, dst);
+ } else {
+ src = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, first_input, TGSI_INTERPOLATE_LINEAR);
+ sampler = ureg_DECL_sampler(shader, 0);
+ ureg_TEX(shader, dst, TGSI_TEXTURE_2D, src, sampler);
+ }
+}
+
struct pipe_video_decoder *
-vl_create_mpeg12_decoder(struct pipe_video_context *context,
- struct pipe_context *pipe,
+vl_create_mpeg12_decoder(struct pipe_context *context,
enum pipe_video_profile profile,
enum pipe_video_entrypoint entrypoint,
enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height)
+ unsigned width, unsigned height, unsigned max_references,
+ bool expect_chunked_decode)
{
+ const unsigned block_size_pixels = VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT;
+ const struct format_config *format_config;
struct vl_mpeg12_decoder *dec;
- unsigned i;
assert(u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12);
dec->base.chroma_format = chroma_format;
dec->base.width = width;
dec->base.height = height;
+ dec->base.max_references = max_references;
dec->base.destroy = vl_mpeg12_destroy;
- dec->base.create_buffer = vl_mpeg12_create_buffer;
- dec->base.flush_buffer = vl_mpeg12_decoder_flush_buffer;
- dec->base.clear_buffer = vl_mpeg12_decoder_clear_buffer;
-
- dec->pipe = pipe;
+ dec->base.begin_frame = vl_mpeg12_begin_frame;
+ dec->base.decode_macroblock = vl_mpeg12_decode_macroblock;
+ dec->base.decode_bitstream = vl_mpeg12_decode_bitstream;
+ dec->base.end_frame = vl_mpeg12_end_frame;
+ dec->base.flush = vl_mpeg12_flush;
- dec->quads = vl_vb_upload_quads(dec->pipe, 2, 2);
- for (i = 0; i < VL_MAX_PLANES; ++i)
- dec->ves[i] = vl_vb_get_elems_state(dec->pipe, i);
-
- dec->base.width = align(width, MACROBLOCK_WIDTH);
- dec->base.height = align(height, MACROBLOCK_HEIGHT);
+ dec->blocks_per_line = MAX2(util_next_power_of_two(dec->base.width) / block_size_pixels, 4);
+ dec->num_blocks = (dec->base.width * dec->base.height) / block_size_pixels;
+ dec->width_in_macroblocks = align(dec->base.width, VL_MACROBLOCK_WIDTH) / VL_MACROBLOCK_WIDTH;
+ dec->expect_chunked_decode = expect_chunked_decode;
/* TODO: Implement 422, 444 */
assert(dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
- dec->empty_block_mask = &const_empty_block_mask_420;
- if (entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
- if (!init_idct(dec, dec->base.width, dec->base.height))
- goto error_idct;
+ if (dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
+ dec->chroma_width = dec->base.width / 2;
+ dec->chroma_height = dec->base.height / 2;
+ dec->num_blocks = dec->num_blocks * 2;
+ } else if (dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422) {
+ dec->chroma_width = dec->base.width;
+ dec->chroma_height = dec->base.height / 2;
+ dec->num_blocks = dec->num_blocks * 2 + dec->num_blocks;
+ } else {
+ dec->chroma_width = dec->base.width;
+ dec->chroma_height = dec->base.height;
+ dec->num_blocks = dec->num_blocks * 3;
+ }
- if (!vl_mpeg12_mc_renderer_init(&dec->mc, dec->pipe, dec->base.width, dec->base.height,
- entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT ? 1.0f : SCALE_FACTOR_16_TO_9))
- goto error_mc;
+ dec->quads = vl_vb_upload_quads(dec->base.context);
+ dec->pos = vl_vb_upload_pos(
+ dec->base.context,
+ dec->base.width / VL_MACROBLOCK_WIDTH,
+ dec->base.height / VL_MACROBLOCK_HEIGHT
+ );
+
+ dec->ves_ycbcr = vl_vb_get_ves_ycbcr(dec->base.context);
+ dec->ves_mv = vl_vb_get_ves_mv(dec->base.context);
+
+ switch (entrypoint) {
+ case PIPE_VIDEO_ENTRYPOINT_BITSTREAM:
+ format_config = find_format_config(dec, bitstream_format_config, num_bitstream_format_configs);
+ break;
+
+ case PIPE_VIDEO_ENTRYPOINT_IDCT:
+ format_config = find_format_config(dec, idct_format_config, num_idct_format_configs);
+ break;
+
+ case PIPE_VIDEO_ENTRYPOINT_MC:
+ format_config = find_format_config(dec, mc_format_config, num_mc_format_configs);
+ break;
+
+ default:
+ assert(0);
+ FREE(dec);
+ return NULL;
+ }
+
+ if (!format_config) {
+ FREE(dec);
+ return NULL;
+ }
+
+ if (!init_zscan(dec, format_config))
+ goto error_zscan;
+
+ if (entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
+ if (!init_idct(dec, format_config))
+ goto error_sources;
+ } else {
+ if (!init_mc_source_widthout_idct(dec, format_config))
+ goto error_sources;
+ }
+
+ if (!vl_mc_init(&dec->mc_y, dec->base.context, dec->base.width, dec->base.height,
+ VL_MACROBLOCK_HEIGHT, format_config->mc_scale,
+ mc_vert_shader_callback, mc_frag_shader_callback, dec))
+ goto error_mc_y;
+
+ // TODO
+ if (!vl_mc_init(&dec->mc_c, dec->base.context, dec->base.width, dec->base.height,
+ VL_BLOCK_HEIGHT, format_config->mc_scale,
+ mc_vert_shader_callback, mc_frag_shader_callback, dec))
+ goto error_mc_c;
if (!init_pipe_state(dec))
goto error_pipe_state;
return &dec->base;
error_pipe_state:
- vl_mpeg12_mc_renderer_cleanup(&dec->mc);
+ vl_mc_cleanup(&dec->mc_c);
-error_mc:
+error_mc_c:
+ vl_mc_cleanup(&dec->mc_y);
+
+error_mc_y:
if (entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
vl_idct_cleanup(&dec->idct_y);
vl_idct_cleanup(&dec->idct_c);
+ dec->idct_source->destroy(dec->idct_source);
}
+ dec->mc_source->destroy(dec->mc_source);
-error_idct:
+error_sources:
+ vl_zscan_cleanup(&dec->zscan_y);
+ vl_zscan_cleanup(&dec->zscan_c);
+
+error_zscan:
FREE(dec);
return NULL;
}