#define BLOCK_WIDTH 8
#define BLOCK_HEIGHT 8
-struct vertex_shader_consts
-{
- struct vertex4f norm;
-};
-
-struct fragment_shader_consts
-{
- struct vertex4f multiplier;
- struct vertex4f div;
-};
-
-struct vert_stream_0
+struct vertex_stream_0
{
struct vertex2f pos;
-
struct {
- float luma_eb;
- float cb_eb;
- float cr_eb;
- } field[2][2];
-
+ float y;
+ float cr;
+ float cb;
+ } eb[2][2];
float interlaced;
};
VS_O_MV3
};
-enum MACROBLOCK_TYPE
-{
- MACROBLOCK_TYPE_INTRA,
- MACROBLOCK_TYPE_FWD_FRAME_PRED,
- MACROBLOCK_TYPE_FWD_FIELD_PRED,
- MACROBLOCK_TYPE_BKWD_FRAME_PRED,
- MACROBLOCK_TYPE_BKWD_FIELD_PRED,
- MACROBLOCK_TYPE_BI_FRAME_PRED,
- MACROBLOCK_TYPE_BI_FIELD_PRED,
-
- NUM_MACROBLOCK_TYPES
-};
-
-/* vertices for a quad covering a macroblock */
-static const struct vertex2f const_quad[4] = {
- {0.0f, 0.0f}, {1.0f, 0.0f}, {1.0f, 1.0f}, {0.0f, 1.0f}
+static const unsigned const_mbtype_config[VL_NUM_MACROBLOCK_TYPES][2] = {
+ [VL_MACROBLOCK_TYPE_INTRA] = { 0, 0 },
+ [VL_MACROBLOCK_TYPE_FWD_FRAME_PRED] = { 1, 1 },
+ [VL_MACROBLOCK_TYPE_FWD_FIELD_PRED] = { 1, 2 },
+ [VL_MACROBLOCK_TYPE_BKWD_FRAME_PRED] = { 1, 1 },
+ [VL_MACROBLOCK_TYPE_BKWD_FIELD_PRED] = { 1, 2 },
+ [VL_MACROBLOCK_TYPE_BI_FRAME_PRED] = { 2, 1 },
+ [VL_MACROBLOCK_TYPE_BI_FIELD_PRED] = { 2, 2 }
};
static void *
create_vert_shader(struct vl_mpeg12_mc_renderer *r, unsigned ref_frames, unsigned mv_per_frame)
{
struct ureg_program *shader;
- struct ureg_src norm, mbs;
+ struct ureg_src scale;
struct ureg_src vrect, vpos, eb[2][2], interlaced, vmv[4];
- struct ureg_dst scale, t_vpos, t_vtex;
+ struct ureg_dst t_vpos, t_vtex;
struct ureg_dst o_vpos, o_line, o_vtex[3], o_eb[2][2], o_interlaced, o_vmv[4];
unsigned i, j, count, label;
if (!shader)
return NULL;
- norm = ureg_DECL_constant(shader, 0);
- mbs = ureg_imm2f(shader, MACROBLOCK_WIDTH, MACROBLOCK_HEIGHT);
-
- scale = ureg_DECL_temporary(shader);
t_vpos = ureg_DECL_temporary(shader);
t_vtex = ureg_DECL_temporary(shader);
interlaced = ureg_DECL_vs_input(shader, VS_I_INTERLACED);
o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
- o_line = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_LINE);
+ o_line = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_LINE);
o_vtex[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX0);
o_vtex[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX1);
- o_vtex[2] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX2);
+ o_vtex[2] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX2);
o_eb[0][0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_EB_0_0);
o_eb[0][1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_EB_0_1);
o_eb[1][0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_EB_1_0);
for (i = 0; i < ref_frames; ++i) {
for (j = 0; j < 2; ++j) {
if(j < mv_per_frame) {
- vmv[count] = ureg_DECL_vs_input(shader, VS_I_MV0 + i * 2 + j);
+ vmv[count] = ureg_DECL_vs_input(shader, VS_I_MV0 + count);
o_vmv[count] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_MV0 + count);
count++;
}
- /* workaround for r600g */
- else if(ref_frames == 2)
- ureg_DECL_vs_input(shader, VS_I_MV0 + i * 2 + j);
}
}
/*
- * scale = norm * mbs;
+ * scale = (MACROBLOCK_WIDTH, MACROBLOCK_HEIGHT) / (dst.width, dst.height)
*
* t_vpos = (vpos + vrect) * scale
* o_vpos.xy = t_vpos
* o_vtex[0..1].xy = t_vpos
* }
* o_vtex[2].xy = t_vpos
+ * o_eb[0..1][0..1] = eb[0..1][0..1]
+ * o_interlaced = interlaced
*
* if(count > 0) { // Apply motion vectors
- * scale = norm * 0.5;
- * o_vmv[0..count] = t_vpos + vmv[0..4] * scale
+ * scale = 0.5 / (dst.width, dst.height);
+ * o_vmv[0..count] = t_vpos + vmv[0..count] * scale
* }
*
*/
- ureg_MUL(shader, ureg_writemask(scale, TGSI_WRITEMASK_XY), norm, mbs);
+ scale = ureg_imm2f(shader,
+ (float)MACROBLOCK_WIDTH / r->buffer_width,
+ (float)MACROBLOCK_HEIGHT / r->buffer_height);
ureg_ADD(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), vpos, vrect);
- ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), ureg_src(scale));
+ ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), scale);
ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos);
ureg_MOV(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_X), vrect);
ureg_MUL(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Y), vrect, ureg_imm1f(shader, 0.5f));
ureg_ADD(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_XY), vpos, ureg_src(t_vtex));
- ureg_MUL(shader, ureg_writemask(o_vtex[0], TGSI_WRITEMASK_XY), ureg_src(t_vtex), ureg_src(scale));
+ ureg_MUL(shader, ureg_writemask(o_vtex[0], TGSI_WRITEMASK_XY), ureg_src(t_vtex), scale);
ureg_ADD(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Y), ureg_src(t_vtex), ureg_imm1f(shader, 0.5f));
- ureg_MUL(shader, ureg_writemask(o_vtex[1], TGSI_WRITEMASK_XY), ureg_src(t_vtex), ureg_src(scale));
+ ureg_MUL(shader, ureg_writemask(o_vtex[1], TGSI_WRITEMASK_XY), ureg_src(t_vtex), scale);
ureg_ELSE(shader, &label);
ureg_MOV(shader, o_interlaced, interlaced);
if(count > 0) {
- ureg_MUL(shader, ureg_writemask(scale, TGSI_WRITEMASK_XY), norm, ureg_imm1f(shader, 0.5f));
+ scale = ureg_imm2f(shader,
+ 0.5f / r->buffer_width,
+ 0.5f / r->buffer_height);
+
for (i = 0; i < count; ++i)
- ureg_MAD(shader, ureg_writemask(o_vmv[i], TGSI_WRITEMASK_XY), ureg_src(scale), vmv[i], ureg_src(t_vpos));
+ ureg_MAD(shader, ureg_writemask(o_vmv[i], TGSI_WRITEMASK_XY), scale, vmv[i], ureg_src(t_vpos));
}
ureg_release_temporary(shader, t_vtex);
ureg_release_temporary(shader, t_vpos);
- ureg_release_temporary(shader, scale);
ureg_END(shader);
*
* tmp.z = fraction(line.y)
* tmp.z = tmp.z >= 0.5 ? 1 : 0
- * tmp.xy = line > 4 ? 1 : 0
+ * tmp.xy = line >= 4 ? 1 : 0
*/
ureg_FRC(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Z), ureg_scalar(line, TGSI_SWIZZLE_Y));
ureg_SGE(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Z), ureg_src(tmp), ureg_imm1f(shader, 0.5f));
static struct ureg_dst
fetch_ycbcr(struct vl_mpeg12_mc_renderer *r, struct ureg_program *shader, struct ureg_dst field)
{
- struct ureg_src tc[3], eb[2][2], interlaced;
- struct ureg_src sampler[3];
- struct ureg_dst texel, t_tc, t_field, tmp;
- unsigned i, l_interlaced, l_y, l_x;
+ struct ureg_src tc[3], sampler[3], eb[2][2], interlaced;
+ struct ureg_dst texel, t_tc, t_eb_info, tmp;
+ unsigned i, label, l_x, l_y;
texel = ureg_DECL_temporary(shader);
t_tc = ureg_DECL_temporary(shader);
- t_field = ureg_DECL_temporary(shader);
+ t_eb_info = ureg_DECL_temporary(shader);
tmp = ureg_DECL_temporary(shader);
tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX0, TGSI_INTERPOLATE_LINEAR);
}
/*
- * texel.y = tex(field ? tc[1] : tc[0], sampler[0])
+ * texel.y = tex(field.y ? tc[1] : tc[0], sampler[0])
* texel.cb = tex(tc[2], sampler[1])
* texel.cr = tex(tc[2], sampler[2])
*/
- ureg_MOV(shader, ureg_writemask(t_field, TGSI_WRITEMASK_XY), ureg_src(field));
- ureg_IF(shader, interlaced, &l_interlaced);
- ureg_MOV(shader, ureg_writemask(t_field, TGSI_WRITEMASK_Y), ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Z));
+
+ ureg_IF(shader, interlaced, &label);
+ ureg_MOV(shader, ureg_writemask(field, TGSI_WRITEMASK_Y), ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Z));
ureg_ENDIF(shader);
- for (i = 0; i < 3; ++i) {
- if(i==0 || r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_444) {
- ureg_IF(shader, ureg_scalar(ureg_src(t_field), TGSI_SWIZZLE_Y), &l_y);
- ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_XY), tc[1]);
+ ureg_CMP(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_XY),
+ ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Y)),
+ tc[1], tc[0]);
- ureg_IF(shader, ureg_scalar(ureg_src(t_field), TGSI_SWIZZLE_X), &l_x);
- ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Z), ureg_scalar(eb[1][1], TGSI_SWIZZLE_X + i));
- ureg_ELSE(shader, &l_x);
- ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Z), ureg_scalar(eb[1][0], TGSI_SWIZZLE_X + i));
- ureg_ENDIF(shader);
+ ureg_IF(shader, ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Y), &l_y);
- ureg_ELSE(shader, &l_y);
- ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_XY), tc[0]);
+ ureg_IF(shader, ureg_scalar(ureg_src(field), TGSI_SWIZZLE_X), &l_x);
+ ureg_MOV(shader, t_eb_info, eb[1][1]);
+ ureg_ELSE(shader, &l_x);
+ ureg_MOV(shader, t_eb_info, eb[1][0]);
+ ureg_ENDIF(shader);
- ureg_IF(shader, ureg_scalar(ureg_src(t_field), TGSI_SWIZZLE_X), &l_x);
- ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Z), ureg_scalar(eb[0][1], TGSI_SWIZZLE_X + i));
- ureg_ELSE(shader, &l_x);
- ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Z), ureg_scalar(eb[0][0], TGSI_SWIZZLE_X + i));
- ureg_ENDIF(shader);
+ ureg_ELSE(shader, &l_y);
- ureg_ENDIF(shader);
+ ureg_IF(shader, ureg_scalar(ureg_src(field), TGSI_SWIZZLE_X), &l_x);
+ ureg_MOV(shader, t_eb_info, eb[0][1]);
+ ureg_ELSE(shader, &l_x);
+ ureg_MOV(shader, t_eb_info, eb[0][0]);
+ ureg_ENDIF(shader);
- } else {
- ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_XY), tc[2]);
- ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Z), ureg_scalar(eb[0][0], TGSI_SWIZZLE_X + i));
- }
+ ureg_ENDIF(shader);
- /* Nouveau and r600g can't writemask tex dst regs (yet?), do in two steps */
- ureg_TEX(shader, tmp, TGSI_TEXTURE_3D, ureg_src(t_tc), sampler[i]);
- ureg_MOV(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X));
+ for (i = 0; i < 3; ++i) {
+ ureg_IF(shader, ureg_scalar(ureg_src(t_eb_info), TGSI_SWIZZLE_X + i), &label);
+ ureg_MOV(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), ureg_imm1f(shader, 0.0f));
+ ureg_ELSE(shader, &label);
+
+ /* Nouveau and r600g can't writemask tex dst regs (yet?), do in two steps */
+ if(i==0 || r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_444) {
+ ureg_TEX(shader, tmp, TGSI_TEXTURE_3D, ureg_src(t_tc), sampler[i]);
+ } else {
+ ureg_TEX(shader, tmp, TGSI_TEXTURE_3D, tc[2], sampler[i]);
+ }
+
+ ureg_MOV(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X));
+
+ ureg_ENDIF(shader);
}
- ureg_release_temporary(shader, t_field);
ureg_release_temporary(shader, t_tc);
+ ureg_release_temporary(shader, t_eb_info);
ureg_release_temporary(shader, tmp);
return texel;
}
-static void *
-create_intra_frag_shader(struct vl_mpeg12_mc_renderer *r)
+static struct ureg_dst
+fetch_ref(struct ureg_program *shader, struct ureg_dst field, unsigned ref_frames, unsigned mv_per_frame)
{
- struct ureg_program *shader;
- struct ureg_dst field, texel;
- struct ureg_dst fragment;
-
- shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
- if (!shader)
- return NULL;
-
- fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
-
- /*
- * texel = fetch_ycbcr()
- * fragment = texel * scale + 0.5
- */
- field = calc_field(shader);
- texel = fetch_ycbcr(r, shader, field);
- ureg_ADD(shader, fragment, ureg_src(texel), ureg_scalar(ureg_imm1f(shader, 0.5f), TGSI_SWIZZLE_X));
-
- ureg_release_temporary(shader, field);
- ureg_release_temporary(shader, texel);
- ureg_END(shader);
-
- return ureg_create_shader_and_destroy(shader, r->pipe);
-}
+ struct ureg_src tc[ref_frames * mv_per_frame], sampler[ref_frames];
+ struct ureg_dst ref[ref_frames], t_tc, result;
+ unsigned i;
-static void *
-create_frame_pred_frag_shader(struct vl_mpeg12_mc_renderer *r)
-{
- struct ureg_program *shader;
- struct ureg_src tc;
- struct ureg_src sampler;
- struct ureg_dst field, texel, ref;
- struct ureg_dst fragment;
+ for (i = 0; i < ref_frames * mv_per_frame; ++i)
+ tc[i] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_MV0 + i, TGSI_INTERPOLATE_LINEAR);
- shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
- if (!shader)
- return NULL;
+ for (i = 0; i < ref_frames; ++i) {
+ sampler[i] = ureg_DECL_sampler(shader, i + 3);
+ ref[i] = ureg_DECL_temporary(shader);
+ }
- tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_MV0, TGSI_INTERPOLATE_LINEAR);
- sampler = ureg_DECL_sampler(shader, 3);
+ result = ureg_DECL_temporary(shader);
+
+ if (ref_frames == 1) {
+ if(mv_per_frame == 1)
+ /*
+ * result = tex(tc[0], sampler[0])
+ */
+ ureg_TEX(shader, result, TGSI_TEXTURE_2D, tc[0], sampler[0]);
+ else {
+ t_tc = ureg_DECL_temporary(shader);
+ /*
+ * result = tex(field.y ? tc[1] : tc[0], sampler[0])
+ */
+ ureg_CMP(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_XY),
+ ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Z)),
+ tc[1], tc[0]);
+ ureg_TEX(shader, result, TGSI_TEXTURE_2D, ureg_src(t_tc), sampler[0]);
+
+ ureg_release_temporary(shader, t_tc);
+ }
- ref = ureg_DECL_temporary(shader);
- fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
+ } else if (ref_frames == 2) {
+ if(mv_per_frame == 1) {
+ /*
+ * ref[0..1] = tex(tc[0..1], sampler[0..1])
+ */
+ ureg_TEX(shader, ref[0], TGSI_TEXTURE_2D, tc[0], sampler[0]);
+ ureg_TEX(shader, ref[1], TGSI_TEXTURE_2D, tc[1], sampler[1]);
+ } else {
+ t_tc = ureg_DECL_temporary(shader);
+
+ /*
+ * if (field.y)
+ * ref[0..1] = tex(tc[0..1], sampler[0..1])
+ * else
+ * ref[0..1] = tex(tc[2..3], sampler[0..1])
+ */
+ ureg_CMP(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_XY),
+ ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Z)),
+ tc[1], tc[0]);
+ ureg_TEX(shader, ref[0], TGSI_TEXTURE_2D, ureg_src(t_tc), sampler[0]);
+
+ ureg_CMP(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_XY),
+ ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Z)),
+ tc[3], tc[2]);
+ ureg_TEX(shader, ref[1], TGSI_TEXTURE_2D, ureg_src(t_tc), sampler[1]);
+
+ ureg_release_temporary(shader, t_tc);
+ }
- /*
- * texel = fetch_ycbcr()
- * ref = tex(tc, sampler)
- * fragment = texel * scale + ref
- */
- field = calc_field(shader);
- texel = fetch_ycbcr(r, shader, field);
- ureg_TEX(shader, ref, TGSI_TEXTURE_2D, tc, sampler);
- ureg_ADD(shader, fragment, ureg_src(texel), ureg_src(ref));
+ ureg_LRP(shader, result, ureg_scalar(ureg_imm1f(shader, 0.5f), TGSI_SWIZZLE_X), ureg_src(ref[0]), ureg_src(ref[1]));
+ }
- ureg_release_temporary(shader, field);
- ureg_release_temporary(shader, texel);
- ureg_release_temporary(shader, ref);
- ureg_END(shader);
+ for (i = 0; i < ref_frames; ++i)
+ ureg_release_temporary(shader, ref[i]);
- return ureg_create_shader_and_destroy(shader, r->pipe);
+ return result;
}
static void *
-create_field_pred_frag_shader(struct vl_mpeg12_mc_renderer *r)
+create_frag_shader(struct vl_mpeg12_mc_renderer *r, unsigned ref_frames, unsigned mv_per_frame)
{
struct ureg_program *shader;
- struct ureg_src tc[2];
- struct ureg_src sampler;
- struct ureg_dst texel, ref, field;
+ struct ureg_src result;
+ struct ureg_dst field, texel;
struct ureg_dst fragment;
- unsigned i, label;
shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
if (!shader)
return NULL;
- for (i = 0; i < 2; ++i)
- tc[i] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_MV0 + i, TGSI_INTERPOLATE_LINEAR);
- sampler = ureg_DECL_sampler(shader, 3);
-
- ref = ureg_DECL_temporary(shader);
fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
- /*
- * texel = fetch_ycbcr()
- * field = calc_field();
- * if(field == 1)
- * ref = tex(tc[1], sampler)
- * else
- * ref = tex(tc[0], sampler)
- * fragment = texel * scale + ref
- */
field = calc_field(shader);
texel = fetch_ycbcr(r, shader, field);
- ureg_IF(shader, ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Z), &label);
- ureg_TEX(shader, ref, TGSI_TEXTURE_2D, tc[1], sampler);
- ureg_ELSE(shader, &label);
- ureg_TEX(shader, ref, TGSI_TEXTURE_2D, tc[0], sampler);
- ureg_ENDIF(shader);
+ if (ref_frames == 0)
+ result = ureg_imm1f(shader, 0.5f);
+ else
+ result = ureg_src(fetch_ref(shader, field, ref_frames, mv_per_frame));
- ureg_ADD(shader, fragment, ureg_src(texel), ureg_src(ref));
+ ureg_ADD(shader, fragment, ureg_src(texel), result);
ureg_release_temporary(shader, field);
ureg_release_temporary(shader, texel);
- ureg_release_temporary(shader, ref);
ureg_END(shader);
return ureg_create_shader_and_destroy(shader, r->pipe);
}
-static void *
-create_frame_bi_pred_frag_shader(struct vl_mpeg12_mc_renderer *r)
+static bool
+init_mbtype_handler(struct vl_mpeg12_mc_renderer *r, enum VL_MACROBLOCK_TYPE type,
+ struct pipe_vertex_element vertex_elems[NUM_VS_INPUTS])
{
- struct ureg_program *shader;
- struct ureg_src tc[2];
- struct ureg_src sampler[2];
- struct ureg_dst field, texel, ref[2];
- struct ureg_dst fragment;
+ unsigned ref_frames, mv_per_frame;
+ struct vl_mc_mbtype_handler *handler;
unsigned i;
- shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
- if (!shader)
- return NULL;
+ assert(r);
- for (i = 0; i < 2; ++i) {
- tc[i] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_MV0 + i, TGSI_INTERPOLATE_LINEAR);
- sampler[i] = ureg_DECL_sampler(shader, i + 3);
- }
+ ref_frames = const_mbtype_config[type][0];
+ mv_per_frame = const_mbtype_config[type][1];
- ref[0] = ureg_DECL_temporary(shader);
- ref[1] = ureg_DECL_temporary(shader);
- fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
+ handler = &r->mbtype_handlers[type];
- /*
- * texel = fetch_ycbcr()
- * ref[0..1 = tex(tc[3..4], sampler[3..4])
- * ref[0] = lerp(ref[0], ref[1], 0.5)
- * fragment = texel * scale + ref[0]
- */
- field = calc_field(shader);
- texel = fetch_ycbcr(r, shader, field);
- ureg_TEX(shader, ref[0], TGSI_TEXTURE_2D, tc[0], sampler[0]);
- ureg_TEX(shader, ref[1], TGSI_TEXTURE_2D, tc[1], sampler[1]);
- ureg_LRP(shader, ref[0], ureg_scalar(ureg_imm1f(shader, 0.5f), TGSI_SWIZZLE_X), ureg_src(ref[0]), ureg_src(ref[1]));
+ handler->vs = create_vert_shader(r, ref_frames, mv_per_frame);
+ handler->fs = create_frag_shader(r, ref_frames, mv_per_frame);
- ureg_ADD(shader, fragment, ureg_src(texel), ureg_src(ref[0]));
+ if (handler->vs == NULL || handler->fs == NULL)
+ return false;
- ureg_release_temporary(shader, field);
- ureg_release_temporary(shader, texel);
- ureg_release_temporary(shader, ref[0]);
- ureg_release_temporary(shader, ref[1]);
- ureg_END(shader);
+ handler->vertex_elems_state = r->pipe->create_vertex_elements_state(
+ r->pipe, 7 + ref_frames * mv_per_frame, vertex_elems);
- return ureg_create_shader_and_destroy(shader, r->pipe);
-}
+ if (handler->vertex_elems_state == NULL)
+ return false;
-static void *
-create_field_bi_pred_frag_shader(struct vl_mpeg12_mc_renderer *r)
-{
- struct ureg_program *shader;
- struct ureg_src tc[4];
- struct ureg_src sampler[2];
- struct ureg_dst texel, ref[2], field;
- struct ureg_dst fragment;
- unsigned i, label;
+ if (!vl_vb_init(&handler->pos, r->macroblocks_per_batch, sizeof(struct vertex_stream_0) / sizeof(float)))
+ return false;
- shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
- if (!shader)
- return NULL;
+ for (i = 0; i < ref_frames * mv_per_frame; ++i) {
+ if (!vl_vb_init(&handler->mv[i], r->macroblocks_per_batch, sizeof(struct vertex2f) / sizeof(float)))
+ return false;
+ }
- for (i = 0; i < 4; ++i)
- tc[i] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_MV0 + i, TGSI_INTERPOLATE_LINEAR);
- for (i = 0; i < 2; ++i)
- sampler[i] = ureg_DECL_sampler(shader, i + 3);
+ return true;
+}
- texel = ureg_DECL_temporary(shader);
- ref[0] = ureg_DECL_temporary(shader);
- ref[1] = ureg_DECL_temporary(shader);
- fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
+static void
+cleanup_mbtype_handler(struct vl_mpeg12_mc_renderer *r, enum VL_MACROBLOCK_TYPE type)
+{
+ unsigned ref_frames, mv_per_frame;
+ struct vl_mc_mbtype_handler *handler;
+ unsigned i;
- /*
- * texel = fetch_ycbcr()
- * if(field == 1)
- * ref[0..1] = tex(tc[1|3], sampler[0..1])
- * else
- * ref[0..1] = tex(tc[0|2], sampler[0..1])
- * ref[0] = lerp(ref[0], ref[1], 0.5)
- * fragment = texel * scale + ref[0]
- */
- field = calc_field(shader);
- texel = fetch_ycbcr(r, shader, field);
+ assert(r);
- ureg_IF(shader, ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Z), &label);
- ureg_TEX(shader, ref[0], TGSI_TEXTURE_2D, tc[1], sampler[0]);
- ureg_TEX(shader, ref[1], TGSI_TEXTURE_2D, tc[3], sampler[1]);
- ureg_ELSE(shader, &label);
- ureg_TEX(shader, ref[0], TGSI_TEXTURE_2D, tc[0], sampler[0]);
- ureg_TEX(shader, ref[1], TGSI_TEXTURE_2D, tc[2], sampler[1]);
- ureg_ENDIF(shader);
+ ref_frames = const_mbtype_config[type][0];
+ mv_per_frame = const_mbtype_config[type][1];
- ureg_LRP(shader, ref[0], ureg_scalar(ureg_imm1f(shader, 0.5f), TGSI_SWIZZLE_X), ureg_src(ref[0]), ureg_src(ref[1]));
+ handler = &r->mbtype_handlers[type];
- ureg_ADD(shader, fragment, ureg_src(texel), ureg_src(ref[0]));
+ r->pipe->delete_vs_state(r->pipe, handler->vs);
+ r->pipe->delete_fs_state(r->pipe, handler->fs);
+ r->pipe->delete_vertex_elements_state(r->pipe, handler->vertex_elems_state);
- ureg_release_temporary(shader, field);
- ureg_release_temporary(shader, texel);
- ureg_release_temporary(shader, ref[0]);
- ureg_release_temporary(shader, ref[1]);
- ureg_END(shader);
+ vl_vb_cleanup(&handler->pos);
- return ureg_create_shader_and_destroy(shader, r->pipe);
+ for (i = 0; i < ref_frames * mv_per_frame; ++i)
+ vl_vb_cleanup(&handler->mv[i]);
}
+
static bool
init_pipe_state(struct vl_mpeg12_mc_renderer *r)
{
struct pipe_sampler_state sampler;
+ struct pipe_rasterizer_state rs_state;
unsigned filters[5];
unsigned i;
assert(r);
- r->viewport.scale[0] = r->pot_buffers ?
- util_next_power_of_two(r->picture_width) : r->picture_width;
- r->viewport.scale[1] = r->pot_buffers ?
- util_next_power_of_two(r->picture_height) : r->picture_height;
+ r->viewport.scale[0] = r->buffer_width;
+ r->viewport.scale[1] = r->buffer_height;
r->viewport.scale[2] = 1;
r->viewport.scale[3] = 1;
r->viewport.translate[0] = 0;
r->viewport.translate[2] = 0;
r->viewport.translate[3] = 0;
- r->fb_state.width = r->pot_buffers ?
- util_next_power_of_two(r->picture_width) : r->picture_width;
- r->fb_state.height = r->pot_buffers ?
- util_next_power_of_two(r->picture_height) : r->picture_height;
+ r->fb_state.width = r->buffer_width;
+ r->fb_state.height = r->buffer_height;
r->fb_state.nr_cbufs = 1;
r->fb_state.zsbuf = NULL;
r->samplers.all[i] = r->pipe->create_sampler_state(r->pipe, &sampler);
}
+ memset(&rs_state, 0, sizeof(rs_state));
+ /*rs_state.sprite_coord_enable */
+ rs_state.sprite_coord_mode = PIPE_SPRITE_COORD_UPPER_LEFT;
+ rs_state.point_quad_rasterization = true;
+ rs_state.point_size = BLOCK_WIDTH;
+ rs_state.gl_rasterization_rules = true;
+ r->rs_state = r->pipe->create_rasterizer_state(r->pipe, &rs_state);
+
return true;
}
for (i = 0; i < 5; ++i)
r->pipe->delete_sampler_state(r->pipe, r->samplers.all[i]);
-}
-
-static bool
-init_shaders(struct vl_mpeg12_mc_renderer *r)
-{
- assert(r);
-
- assert(r->i_vs = create_vert_shader(r, 0, 0));
- assert(r->i_fs = create_intra_frag_shader(r));
-
- assert(r->p_vs[0] = create_vert_shader(r, 1, 1));
- assert(r->p_vs[1] = create_vert_shader(r, 1, 2));
- assert(r->p_fs[0] = create_frame_pred_frag_shader(r));
- assert(r->p_fs[1] = create_field_pred_frag_shader(r));
-
- assert(r->b_vs[0] = create_vert_shader(r, 2, 1));
- assert(r->b_vs[1] = create_vert_shader(r, 2, 2));
- assert(r->b_fs[0] = create_frame_bi_pred_frag_shader(r));
- assert(r->b_fs[1] = create_field_bi_pred_frag_shader(r));
-
- return true;
-}
-static void
-cleanup_shaders(struct vl_mpeg12_mc_renderer *r)
-{
- assert(r);
-
- r->pipe->delete_vs_state(r->pipe, r->i_vs);
- r->pipe->delete_fs_state(r->pipe, r->i_fs);
- r->pipe->delete_vs_state(r->pipe, r->p_vs[0]);
- r->pipe->delete_vs_state(r->pipe, r->p_vs[1]);
- r->pipe->delete_fs_state(r->pipe, r->p_fs[0]);
- r->pipe->delete_fs_state(r->pipe, r->p_fs[1]);
- r->pipe->delete_vs_state(r->pipe, r->b_vs[0]);
- r->pipe->delete_vs_state(r->pipe, r->b_vs[1]);
- r->pipe->delete_fs_state(r->pipe, r->b_fs[0]);
- r->pipe->delete_fs_state(r->pipe, r->b_fs[1]);
+ r->pipe->delete_rasterizer_state(r->pipe, r->rs_state);
}
static bool
init_buffers(struct vl_mpeg12_mc_renderer *r)
{
+ struct pipe_resource *idct_matrix;
struct pipe_resource template;
struct pipe_vertex_element vertex_elems[NUM_VS_INPUTS];
struct pipe_sampler_view sampler_view;
const unsigned mbw =
- align(r->picture_width, MACROBLOCK_WIDTH) / MACROBLOCK_WIDTH;
+ align(r->buffer_width, MACROBLOCK_WIDTH) / MACROBLOCK_WIDTH;
const unsigned mbh =
- align(r->picture_height, MACROBLOCK_HEIGHT) / MACROBLOCK_HEIGHT;
+ align(r->buffer_height, MACROBLOCK_HEIGHT) / MACROBLOCK_HEIGHT;
- unsigned i;
+ unsigned i, stride;
assert(r);
r->macroblocks_per_batch =
mbw * (r->bufmode == VL_MPEG12_MC_RENDERER_BUFFER_PICTURE ? mbh : 1);
r->num_macroblocks = 0;
- r->macroblock_buf = MALLOC(r->macroblocks_per_batch * sizeof(struct pipe_mpeg12_macroblock));
memset(&template, 0, sizeof(struct pipe_resource));
- template.target = PIPE_TEXTURE_3D;
+ template.target = PIPE_TEXTURE_2D;
/* TODO: Accomodate HW that can't do this and also for cases when this isn't precise enough */
template.format = PIPE_FORMAT_R16_SNORM;
template.last_level = 0;
- template.width0 = r->pot_buffers ?
- util_next_power_of_two(r->picture_width) : r->picture_width;
- template.height0 = r->pot_buffers ?
- util_next_power_of_two(r->picture_height) : r->picture_height;
+ template.width0 = r->buffer_width;
+ template.height0 = r->buffer_height;
template.depth0 = 1;
template.usage = PIPE_USAGE_DYNAMIC;
template.bind = PIPE_BIND_SAMPLER_VIEW;
r->textures.individual.y = r->pipe->screen->resource_create(r->pipe->screen, &template);
+ if (!(idct_matrix = vl_idct_upload_matrix(r->pipe)))
+ return false;
+
+ if (!vl_idct_init(&r->idct_luma, r->pipe, r->buffer_width, r->buffer_height, idct_matrix))
+ return false;
+
+ if (!vl_idct_init_buffer(&r->idct_luma, &r->idct_y, r->textures.individual.y))
+ return false;
+
+ vl_idct_map_buffers(&r->idct_luma, &r->idct_y);
+
if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
- template.width0 = r->pot_buffers ?
- util_next_power_of_two(r->picture_width / 2) :
- r->picture_width / 2;
- template.height0 = r->pot_buffers ?
- util_next_power_of_two(r->picture_height / 2) :
- r->picture_height / 2;
+ template.width0 = r->buffer_width / 2;
+ template.height0 = r->buffer_height / 2;
}
else if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422)
- template.height0 = r->pot_buffers ?
- util_next_power_of_two(r->picture_height / 2) :
- r->picture_height / 2;
+ template.height0 = r->buffer_height / 2;
r->textures.individual.cb =
r->pipe->screen->resource_create(r->pipe->screen, &template);
r->textures.individual.cr =
r->pipe->screen->resource_create(r->pipe->screen, &template);
+ if(!vl_idct_init(&r->idct_chroma, r->pipe, template.width0, template.height0, idct_matrix))
+ return false;
+
+ if (!vl_idct_init_buffer(&r->idct_chroma, &r->idct_cb, r->textures.individual.cb))
+ return false;
+
+ vl_idct_map_buffers(&r->idct_chroma, &r->idct_cb);
+
+ if (!vl_idct_init_buffer(&r->idct_chroma, &r->idct_cr, r->textures.individual.cr))
+ return false;
+
+ vl_idct_map_buffers(&r->idct_chroma, &r->idct_cr);
+
for (i = 0; i < 3; ++i) {
u_sampler_view_default_template(&sampler_view,
r->textures.all[i],
r->sampler_views.all[i] = r->pipe->create_sampler_view(r->pipe, r->textures.all[i], &sampler_view);
}
- r->vertex_bufs.individual.rect.stride = sizeof(struct vertex2f);
- r->vertex_bufs.individual.rect.max_index = 4 * r->macroblocks_per_batch - 1;
- r->vertex_bufs.individual.rect.buffer_offset = 0;
- r->vertex_bufs.individual.rect.buffer = pipe_buffer_create
- (
- r->pipe->screen,
- PIPE_BIND_VERTEX_BUFFER,
- sizeof(struct vertex2f) * 4 * r->macroblocks_per_batch
- );
-
- r->vertex_bufs.individual.ycbcr.stride = sizeof(struct vert_stream_0);
- r->vertex_bufs.individual.ycbcr.max_index = 4 * r->macroblocks_per_batch - 1;
- r->vertex_bufs.individual.ycbcr.buffer_offset = 0;
- /* XXX: Create with usage DYNAMIC or STREAM */
- r->vertex_bufs.individual.ycbcr.buffer = pipe_buffer_create
- (
- r->pipe->screen,
- PIPE_BIND_VERTEX_BUFFER,
- sizeof(struct vert_stream_0) * 4 * r->macroblocks_per_batch
- );
-
- for (i = 0; i < 2; ++i) {
- r->vertex_bufs.individual.ref[i].stride = sizeof(struct vertex2f) * 2;
- r->vertex_bufs.individual.ref[i].max_index = 4 * r->macroblocks_per_batch - 1;
- r->vertex_bufs.individual.ref[i].buffer_offset = 0;
- /* XXX: Create with usage DYNAMIC or STREAM */
- r->vertex_bufs.individual.ref[i].buffer = pipe_buffer_create
- (
- r->pipe->screen,
- PIPE_BIND_VERTEX_BUFFER,
- sizeof(struct vertex2f) * 2 * 4 * r->macroblocks_per_batch
- );
- }
-
memset(&vertex_elems, 0, sizeof(vertex_elems));
- /* Rectangle element */
- vertex_elems[VS_I_RECT].src_offset = 0;
- vertex_elems[VS_I_RECT].instance_divisor = 0;
- vertex_elems[VS_I_RECT].vertex_buffer_index = 0;
- vertex_elems[VS_I_RECT].src_format = PIPE_FORMAT_R32G32_FLOAT;
+ vertex_elems[VS_I_RECT] = vl_vb_get_quad_vertex_element();
+ r->vertex_bufs.individual.quad = vl_vb_upload_quads(r->pipe, r->macroblocks_per_batch);
/* Position element */
- vertex_elems[VS_I_VPOS].src_offset = 0;
- vertex_elems[VS_I_VPOS].instance_divisor = 0;
- vertex_elems[VS_I_VPOS].vertex_buffer_index = 1;
vertex_elems[VS_I_VPOS].src_format = PIPE_FORMAT_R32G32_FLOAT;
- /* y, cr, cb z-coordinate element top left block */
- vertex_elems[VS_I_EB_0_0].src_offset = sizeof(struct vertex2f);
- vertex_elems[VS_I_EB_0_0].instance_divisor = 0;
- vertex_elems[VS_I_EB_0_0].vertex_buffer_index = 1;
+ /* y, cr, cb empty block element top left block */
vertex_elems[VS_I_EB_0_0].src_format = PIPE_FORMAT_R32G32B32_FLOAT;
- /* y, cr, cb z-coordinate element top right block */
- vertex_elems[VS_I_EB_0_1].src_offset = sizeof(struct vertex2f) + sizeof(float) * 3;
- vertex_elems[VS_I_EB_0_1].instance_divisor = 0;
- vertex_elems[VS_I_EB_0_1].vertex_buffer_index = 1;
+ /* y, cr, cb empty block element top right block */
vertex_elems[VS_I_EB_0_1].src_format = PIPE_FORMAT_R32G32B32_FLOAT;
- /* y, cr, cb z-coordinate element bottom left block */
- vertex_elems[VS_I_EB_1_0].src_offset = sizeof(struct vertex2f) + sizeof(float) * 6;
- vertex_elems[VS_I_EB_1_0].instance_divisor = 0;
- vertex_elems[VS_I_EB_1_0].vertex_buffer_index = 1;
+ /* y, cr, cb empty block element bottom left block */
vertex_elems[VS_I_EB_1_0].src_format = PIPE_FORMAT_R32G32B32_FLOAT;
- /* y, cr, cb z-coordinate element bottom right block */
- vertex_elems[VS_I_EB_1_1].src_offset = sizeof(struct vertex2f) + sizeof(float) * 9;
- vertex_elems[VS_I_EB_1_1].instance_divisor = 0;
- vertex_elems[VS_I_EB_1_1].vertex_buffer_index = 1;
+ /* y, cr, cb empty block element bottom right block */
vertex_elems[VS_I_EB_1_1].src_format = PIPE_FORMAT_R32G32B32_FLOAT;
- /* progressive=1.0f interlaced=0.0f */
- vertex_elems[VS_I_INTERLACED].src_offset = sizeof(struct vertex2f) + sizeof(float) * 12;
- vertex_elems[VS_I_INTERLACED].instance_divisor = 0;
- vertex_elems[VS_I_INTERLACED].vertex_buffer_index = 1;
+ /* progressive=0.0f interlaced=1.0f */
vertex_elems[VS_I_INTERLACED].src_format = PIPE_FORMAT_R32_FLOAT;
- /* First ref surface top field texcoord element */
- vertex_elems[VS_I_MV0].src_offset = 0;
- vertex_elems[VS_I_MV0].instance_divisor = 0;
- vertex_elems[VS_I_MV0].vertex_buffer_index = 2;
- vertex_elems[VS_I_MV0].src_format = PIPE_FORMAT_R32G32_FLOAT;
-
- /* First ref surface bottom field texcoord element */
- vertex_elems[VS_I_MV1].src_offset = sizeof(struct vertex2f);
- vertex_elems[VS_I_MV1].instance_divisor = 0;
- vertex_elems[VS_I_MV1].vertex_buffer_index = 2;
- vertex_elems[VS_I_MV1].src_format = PIPE_FORMAT_R32G32_FLOAT;
-
- /* Second ref surface top field texcoord element */
- vertex_elems[VS_I_MV2].src_offset = 0;
- vertex_elems[VS_I_MV2].instance_divisor = 0;
- vertex_elems[VS_I_MV2].vertex_buffer_index = 3;
- vertex_elems[VS_I_MV2].src_format = PIPE_FORMAT_R32G32_FLOAT;
-
- /* Second ref surface bottom field texcoord element */
- vertex_elems[VS_I_MV3].src_offset = sizeof(struct vertex2f);
- vertex_elems[VS_I_MV3].instance_divisor = 0;
- vertex_elems[VS_I_MV3].vertex_buffer_index = 3;
- vertex_elems[VS_I_MV3].src_format = PIPE_FORMAT_R32G32_FLOAT;
-
- r->vertex_elems_state.individual.i = r->pipe->create_vertex_elements_state(r->pipe, 7, vertex_elems);
- r->vertex_elems_state.individual.p = r->pipe->create_vertex_elements_state(r->pipe, 9, vertex_elems);
- r->vertex_elems_state.individual.b = r->pipe->create_vertex_elements_state(r->pipe, 11, vertex_elems);
-
- r->vs_const_buf = pipe_buffer_create
- (
- r->pipe->screen,
- PIPE_BIND_CONSTANT_BUFFER,
- sizeof(struct vertex_shader_consts)
- );
-
- return true;
-}
+ stride = vl_vb_element_helper(&vertex_elems[VS_I_VPOS], 6, 1);
-static bool
-init_const_buffers(struct vl_mpeg12_mc_renderer *r)
-{
- struct pipe_transfer *buf_transfer;
- struct vertex2f *rect;
- unsigned i;
+ r->vertex_bufs.individual.pos = vl_vb_create_buffer(
+ r->pipe, r->macroblocks_per_batch, stride);
- rect = pipe_buffer_map
- (
- r->pipe,
- r->vertex_bufs.individual.rect.buffer,
- PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
- &buf_transfer
- );
+ for (i = 0; i < 4; ++i) {
+ /* motion vector 0..4 element */
+ vertex_elems[VS_I_MV0 + i].src_format = PIPE_FORMAT_R32G32_FLOAT;
+ stride = vl_vb_element_helper(&vertex_elems[VS_I_MV0 + i], 1, i + 2);
+ r->vertex_bufs.individual.mv[i] = vl_vb_create_buffer(
+ r->pipe, r->macroblocks_per_batch, stride);
+ }
- for ( i = 0; i < r->macroblocks_per_batch; ++i)
- memcpy(rect + i * 4, &const_quad, sizeof(const_quad));
+ for(i = 0; i < VL_NUM_MACROBLOCK_TYPES; ++i)
+ init_mbtype_handler(r, i, vertex_elems);
- pipe_buffer_unmap(r->pipe, r->vertex_bufs.individual.rect.buffer, buf_transfer);
-
return true;
}
assert(r);
- pipe_resource_reference(&r->vs_const_buf, NULL);
-
for (i = 0; i < 3; ++i) {
pipe_sampler_view_reference(&r->sampler_views.all[i], NULL);
- r->pipe->delete_vertex_elements_state(r->pipe, r->vertex_elems_state.all[i]);
pipe_resource_reference(&r->vertex_bufs.all[i].buffer, NULL);
pipe_resource_reference(&r->textures.all[i], NULL);
}
- FREE(r->macroblock_buf);
+ for(i = 0; i<VL_NUM_MACROBLOCK_TYPES; ++i)
+ cleanup_mbtype_handler(r, i);
+
+ vl_idct_unmap_buffers(&r->idct_luma, &r->idct_y);
+ vl_idct_unmap_buffers(&r->idct_chroma, &r->idct_cb);
+ vl_idct_unmap_buffers(&r->idct_chroma, &r->idct_cr);
+
+ vl_idct_cleanup_buffer(&r->idct_luma, &r->idct_y);
+ vl_idct_cleanup_buffer(&r->idct_chroma, &r->idct_cb);
+ vl_idct_cleanup_buffer(&r->idct_chroma, &r->idct_cr);
+
+ vl_idct_cleanup(&r->idct_luma);
+ vl_idct_cleanup(&r->idct_chroma);
}
-static enum MACROBLOCK_TYPE
+static enum VL_MACROBLOCK_TYPE
get_macroblock_type(struct pipe_mpeg12_macroblock *mb)
{
assert(mb);
switch (mb->mb_type) {
case PIPE_MPEG12_MACROBLOCK_TYPE_INTRA:
- return MACROBLOCK_TYPE_INTRA;
+ return VL_MACROBLOCK_TYPE_INTRA;
case PIPE_MPEG12_MACROBLOCK_TYPE_FWD:
return mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME ?
- MACROBLOCK_TYPE_FWD_FRAME_PRED : MACROBLOCK_TYPE_FWD_FIELD_PRED;
+ VL_MACROBLOCK_TYPE_FWD_FRAME_PRED : VL_MACROBLOCK_TYPE_FWD_FIELD_PRED;
case PIPE_MPEG12_MACROBLOCK_TYPE_BKWD:
return mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME ?
- MACROBLOCK_TYPE_BKWD_FRAME_PRED : MACROBLOCK_TYPE_BKWD_FIELD_PRED;
+ VL_MACROBLOCK_TYPE_BKWD_FRAME_PRED : VL_MACROBLOCK_TYPE_BKWD_FIELD_PRED;
case PIPE_MPEG12_MACROBLOCK_TYPE_BI:
return mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME ?
- MACROBLOCK_TYPE_BI_FRAME_PRED : MACROBLOCK_TYPE_BI_FIELD_PRED;
+ VL_MACROBLOCK_TYPE_BI_FRAME_PRED : VL_MACROBLOCK_TYPE_BI_FIELD_PRED;
default:
assert(0);
}
return -1;
}
-void
-gen_macroblock_verts(struct vl_mpeg12_mc_renderer *r,
- struct pipe_mpeg12_macroblock *mb, unsigned pos,
- struct vert_stream_0 *ycbcr_vb, struct vertex2f **ref_vb)
+static void
+upload_vertex_stream(struct vl_mpeg12_mc_renderer *r,
+ unsigned num_macroblocks[VL_NUM_MACROBLOCK_TYPES])
{
- struct vertex2f mo_vec[2];
-
- unsigned i;
-
- assert(r);
- assert(mb);
- assert(ycbcr_vb);
- assert(pos < r->macroblocks_per_batch);
-
- mo_vec[1].x = 0;
- mo_vec[1].y = 0;
-
- switch (mb->mb_type) {
- case PIPE_MPEG12_MACROBLOCK_TYPE_BI:
- {
- struct vertex2f *vb;
-
- assert(ref_vb && ref_vb[1]);
-
- vb = ref_vb[1] + pos * 2 * 4;
-
- mo_vec[0].x = mb->pmv[0][1][0];
- mo_vec[0].y = mb->pmv[0][1][1];
-
- if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME) {
- for (i = 0; i < 4 * 2; i += 2) {
- vb[i].x = mo_vec[0].x;
- vb[i].y = mo_vec[0].y;
- }
- }
- else {
- mo_vec[0].y = mb->pmv[0][1][1] - (mb->pmv[0][1][1] % 4);
-
- mo_vec[1].x = mb->pmv[1][1][0];
- mo_vec[1].y = mb->pmv[1][1][1] - (mb->pmv[1][1][1] % 4);
-
- if(mb->mvfs[0][1]) mo_vec[0].y += 2;
- if(!mb->mvfs[1][1]) mo_vec[1].y -= 2;
-
- for (i = 0; i < 4 * 2; i += 2) {
- vb[i].x = mo_vec[0].x;
- vb[i].y = mo_vec[0].y;
- vb[i + 1].x = mo_vec[1].x;
- vb[i + 1].y = mo_vec[1].y;
- }
- }
-
- /* fall-through */
- }
- case PIPE_MPEG12_MACROBLOCK_TYPE_FWD:
- case PIPE_MPEG12_MACROBLOCK_TYPE_BKWD:
- {
- struct vertex2f *vb;
-
- assert(ref_vb && ref_vb[0]);
-
- vb = ref_vb[0] + pos * 2 * 4;
-
- if (mb->mb_type == PIPE_MPEG12_MACROBLOCK_TYPE_BKWD) {
- mo_vec[0].x = mb->pmv[0][1][0];
- mo_vec[0].y = mb->pmv[0][1][1];
-
- if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FIELD) {
- mo_vec[0].y = mb->pmv[0][1][1] - (mb->pmv[0][1][1] % 4);
-
- mo_vec[1].x = mb->pmv[1][1][0];
- mo_vec[1].y = mb->pmv[1][1][1] - (mb->pmv[1][1][1] % 4);
-
- if(mb->mvfs[0][1]) mo_vec[0].y += 2;
- if(!mb->mvfs[1][1]) mo_vec[1].y -= 2;
- }
- }
- else {
- mo_vec[0].x = mb->pmv[0][0][0];
- mo_vec[0].y = mb->pmv[0][0][1];
-
- if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FIELD) {
- mo_vec[0].y = mb->pmv[0][0][1] - (mb->pmv[0][0][1] % 4);
-
- mo_vec[1].x = mb->pmv[1][0][0];
- mo_vec[1].y = mb->pmv[1][0][1] - (mb->pmv[1][0][1] % 4);
-
- if(mb->mvfs[0][0]) mo_vec[0].y += 2;
- if(!mb->mvfs[1][0]) mo_vec[1].y -= 2;
- }
- }
-
- if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME) {
- for (i = 0; i < 4 * 2; i += 2) {
- vb[i].x = mo_vec[0].x;
- vb[i].y = mo_vec[0].y;
- }
- }
- else {
- for (i = 0; i < 4 * 2; i += 2) {
- vb[i].x = mo_vec[0].x;
- vb[i].y = mo_vec[0].y;
- vb[i + 1].x = mo_vec[1].x;
- vb[i + 1].y = mo_vec[1].y;
- }
- }
-
- /* fall-through */
- }
- case PIPE_MPEG12_MACROBLOCK_TYPE_INTRA:
- {
- struct vert_stream_0 *vb = ycbcr_vb + pos * 4;
- struct vert_stream_0 v;
-
- v.pos.x = mb->mbx;
- v.pos.y = mb->mby;
-
- v.field[0][0].luma_eb = mb->cbp & 32 ? 0.0f : -1.0f;
- v.field[0][1].luma_eb = mb->cbp & 16 ? 0.0f : -1.0f;
- v.field[1][0].luma_eb = mb->cbp & 8 ? 0.0f : -1.0f;
- v.field[1][1].luma_eb = mb->cbp & 4 ? 0.0f : -1.0f;
-
- v.field[0][0].cb_eb = mb->cbp & 2 ? 0.0f : -1.0f;
- v.field[0][1].cb_eb = mb->cbp & 2 ? 0.0f : -1.0f;
- v.field[1][0].cb_eb = mb->cbp & 2 ? 0.0f : -1.0f;
- v.field[1][1].cb_eb = mb->cbp & 2 ? 0.0f : -1.0f;
-
- v.field[0][0].cr_eb = mb->cbp & 1 ? 0.0f : -1.0f;
- v.field[0][1].cr_eb = mb->cbp & 1 ? 0.0f : -1.0f;
- v.field[1][0].cr_eb = mb->cbp & 1 ? 0.0f : -1.0f;
- v.field[1][1].cr_eb = mb->cbp & 1 ? 0.0f : -1.0f;
-
- v.interlaced = mb->dct_type == PIPE_MPEG12_DCT_TYPE_FIELD ? 1.0f : 0.0f;
-
- for ( i = 0; i < 4; ++i )
- memcpy(vb + i, &v, sizeof(v));
+ struct vertex_stream_0 *pos;
+ struct vertex2f *mv[4];
- break;
- }
- default:
- assert(0);
- }
-}
+ struct pipe_transfer *buf_transfer[5];
-static void
-gen_macroblock_stream(struct vl_mpeg12_mc_renderer *r,
- unsigned *num_macroblocks)
-{
- unsigned offset[NUM_MACROBLOCK_TYPES];
- struct vert_stream_0 *ycbcr_vb;
- struct vertex2f *ref_vb[2];
- struct pipe_transfer *buf_transfer[3];
- unsigned i;
+ unsigned i, j;
assert(r);
assert(num_macroblocks);
- for (i = 0; i < r->num_macroblocks; ++i) {
- enum MACROBLOCK_TYPE mb_type = get_macroblock_type(&r->macroblock_buf[i]);
- ++num_macroblocks[mb_type];
- }
-
- offset[0] = 0;
-
- for (i = 1; i < NUM_MACROBLOCK_TYPES; ++i)
- offset[i] = offset[i - 1] + num_macroblocks[i - 1];
-
- ycbcr_vb = (struct vert_stream_0 *)pipe_buffer_map
+ pos = (struct vertex_stream_0 *)pipe_buffer_map
(
r->pipe,
- r->vertex_bufs.individual.ycbcr.buffer,
+ r->vertex_bufs.individual.pos.buffer,
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
&buf_transfer[0]
);
- for (i = 0; i < 2; ++i)
- ref_vb[i] = (struct vertex2f *)pipe_buffer_map
+ for (i = 0; i < 4; ++i)
+ mv[i] = (struct vertex2f *)pipe_buffer_map
(
r->pipe,
- r->vertex_bufs.individual.ref[i].buffer,
+ r->vertex_bufs.individual.mv[i].buffer,
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
&buf_transfer[i + 1]
);
- for (i = 0; i < r->num_macroblocks; ++i) {
- enum MACROBLOCK_TYPE mb_type = get_macroblock_type(&r->macroblock_buf[i]);
+ for (i = 0; i < VL_NUM_MACROBLOCK_TYPES; ++i) {
+ struct vl_mc_mbtype_handler *handler = &r->mbtype_handlers[i];
+ unsigned count = vl_vb_upload(&handler->pos, pos);
+ if (count > 0) {
+ pos += count;
+
+ unsigned ref_frames, mv_per_frame;
- gen_macroblock_verts(r, &r->macroblock_buf[i], offset[mb_type],
- ycbcr_vb, ref_vb);
+ ref_frames = const_mbtype_config[i][0];
+ mv_per_frame = const_mbtype_config[i][1];
- ++offset[mb_type];
+ for (j = 0; j < ref_frames * mv_per_frame; ++j)
+ vl_vb_upload(&handler->mv[j], mv[j]);
+
+ for (j = 0; j < 4; ++j)
+ mv[j] += count;
+ }
+ num_macroblocks[i] = count;
}
- pipe_buffer_unmap(r->pipe, r->vertex_bufs.individual.ycbcr.buffer, buf_transfer[0]);
- for (i = 0; i < 2; ++i)
- pipe_buffer_unmap(r->pipe, r->vertex_bufs.individual.ref[i].buffer, buf_transfer[i + 1]);
+ pipe_buffer_unmap(r->pipe, r->vertex_bufs.individual.pos.buffer, buf_transfer[0]);
+ for (i = 0; i < 4; ++i)
+ pipe_buffer_unmap(r->pipe, r->vertex_bufs.individual.mv[i].buffer, buf_transfer[i + 1]);
}
static struct pipe_sampler_view
return sampler_view;
}
-static void
-flush(struct vl_mpeg12_mc_renderer *r)
+static unsigned
+flush_mbtype_handler(struct vl_mpeg12_mc_renderer *r, enum VL_MACROBLOCK_TYPE type,
+ unsigned vb_start, unsigned num_macroblocks)
{
- unsigned num_macroblocks[NUM_MACROBLOCK_TYPES] = { 0 };
- unsigned vb_start = 0;
+ unsigned ref_frames, mv_per_frame;
+ struct vl_mc_mbtype_handler *handler;
assert(r);
- assert(r->num_macroblocks == r->macroblocks_per_batch);
-
- vl_idct_flush(&r->idct_y);
- vl_idct_flush(&r->idct_cr);
- vl_idct_flush(&r->idct_cb);
- gen_macroblock_stream(r, num_macroblocks);
+ ref_frames = const_mbtype_config[type][0];
+ mv_per_frame = const_mbtype_config[type][1];
- r->pipe->set_constant_buffer(r->pipe, PIPE_SHADER_VERTEX, 0, r->vs_const_buf);
- r->pipe->set_framebuffer_state(r->pipe, &r->fb_state);
- r->pipe->set_viewport_state(r->pipe, &r->viewport);
+ handler = &r->mbtype_handlers[type];
- if (num_macroblocks[MACROBLOCK_TYPE_INTRA] > 0) {
- r->pipe->set_vertex_buffers(r->pipe, 2, r->vertex_bufs.all);
- r->pipe->bind_vertex_elements_state(r->pipe, r->vertex_elems_state.individual.i);
- r->pipe->set_fragment_sampler_views(r->pipe, 3, r->sampler_views.all);
- r->pipe->bind_fragment_sampler_states(r->pipe, 3, r->samplers.all);
- r->pipe->bind_vs_state(r->pipe, r->i_vs);
- r->pipe->bind_fs_state(r->pipe, r->i_fs);
+ r->pipe->set_vertex_buffers(r->pipe, 2 + ref_frames * mv_per_frame, r->vertex_bufs.all);
+ r->pipe->bind_vertex_elements_state(r->pipe, handler->vertex_elems_state);
- util_draw_arrays(r->pipe, PIPE_PRIM_QUADS, vb_start,
- num_macroblocks[MACROBLOCK_TYPE_INTRA] * 4);
- vb_start += num_macroblocks[MACROBLOCK_TYPE_INTRA] * 4;
- }
+ if(ref_frames == 2) {
- if (num_macroblocks[MACROBLOCK_TYPE_FWD_FRAME_PRED] > 0) {
- r->pipe->set_vertex_buffers(r->pipe, 3, r->vertex_bufs.all);
- r->pipe->bind_vertex_elements_state(r->pipe, r->vertex_elems_state.individual.p);
r->textures.individual.ref[0] = r->past->texture;
+ r->textures.individual.ref[1] = r->future->texture;
r->sampler_views.individual.ref[0] = find_or_create_sampler_view(r, r->past);
- r->pipe->set_fragment_sampler_views(r->pipe, 4, r->sampler_views.all);
- r->pipe->bind_fragment_sampler_states(r->pipe, 4, r->samplers.all);
- r->pipe->bind_vs_state(r->pipe, r->p_vs[0]);
- r->pipe->bind_fs_state(r->pipe, r->p_fs[0]);
-
- util_draw_arrays(r->pipe, PIPE_PRIM_QUADS, vb_start,
- num_macroblocks[MACROBLOCK_TYPE_FWD_FRAME_PRED] * 4);
- vb_start += num_macroblocks[MACROBLOCK_TYPE_FWD_FRAME_PRED] * 4;
- }
+ r->sampler_views.individual.ref[1] = find_or_create_sampler_view(r, r->future);
- if (num_macroblocks[MACROBLOCK_TYPE_FWD_FIELD_PRED] > 0) {
- r->pipe->set_vertex_buffers(r->pipe, 3, r->vertex_bufs.all);
- r->pipe->bind_vertex_elements_state(r->pipe, r->vertex_elems_state.individual.p);
- r->textures.individual.ref[0] = r->past->texture;
- r->sampler_views.individual.ref[0] = find_or_create_sampler_view(r, r->past);
- r->pipe->set_fragment_sampler_views(r->pipe, 4, r->sampler_views.all);
- r->pipe->bind_fragment_sampler_states(r->pipe, 4, r->samplers.all);
- r->pipe->bind_vs_state(r->pipe, r->p_vs[1]);
- r->pipe->bind_fs_state(r->pipe, r->p_fs[1]);
-
- util_draw_arrays(r->pipe, PIPE_PRIM_QUADS, vb_start,
- num_macroblocks[MACROBLOCK_TYPE_FWD_FIELD_PRED] * 4);
- vb_start += num_macroblocks[MACROBLOCK_TYPE_FWD_FIELD_PRED] * 4;
- }
+ } else if(ref_frames == 1) {
- if (num_macroblocks[MACROBLOCK_TYPE_BKWD_FRAME_PRED] > 0) {
- r->pipe->set_vertex_buffers(r->pipe, 3, r->vertex_bufs.all);
- r->pipe->bind_vertex_elements_state(r->pipe, r->vertex_elems_state.individual.p);
- r->textures.individual.ref[0] = r->future->texture;
- r->sampler_views.individual.ref[0] = find_or_create_sampler_view(r, r->future);
- r->pipe->set_fragment_sampler_views(r->pipe, 4, r->sampler_views.all);
- r->pipe->bind_fragment_sampler_states(r->pipe, 4, r->samplers.all);
- r->pipe->bind_vs_state(r->pipe, r->p_vs[0]);
- r->pipe->bind_fs_state(r->pipe, r->p_fs[0]);
-
- util_draw_arrays(r->pipe, PIPE_PRIM_QUADS, vb_start,
- num_macroblocks[MACROBLOCK_TYPE_BKWD_FRAME_PRED] * 4);
- vb_start += num_macroblocks[MACROBLOCK_TYPE_BKWD_FRAME_PRED] * 4;
- }
+ struct pipe_surface *ref;
- if (num_macroblocks[MACROBLOCK_TYPE_BKWD_FIELD_PRED] > 0) {
- r->pipe->set_vertex_buffers(r->pipe, 3, r->vertex_bufs.all);
- r->pipe->bind_vertex_elements_state(r->pipe, r->vertex_elems_state.individual.p);
- r->textures.individual.ref[0] = r->future->texture;
- r->sampler_views.individual.ref[0] = find_or_create_sampler_view(r, r->future);
- r->pipe->set_fragment_sampler_views(r->pipe, 4, r->sampler_views.all);
- r->pipe->bind_fragment_sampler_states(r->pipe, 4, r->samplers.all);
- r->pipe->bind_vs_state(r->pipe, r->p_vs[1]);
- r->pipe->bind_fs_state(r->pipe, r->p_fs[1]);
-
- util_draw_arrays(r->pipe, PIPE_PRIM_QUADS, vb_start,
- num_macroblocks[MACROBLOCK_TYPE_BKWD_FIELD_PRED] * 4);
- vb_start += num_macroblocks[MACROBLOCK_TYPE_BKWD_FIELD_PRED] * 4;
- }
+ if(type == VL_MACROBLOCK_TYPE_BKWD_FRAME_PRED ||
+ type == VL_MACROBLOCK_TYPE_BKWD_FIELD_PRED)
+ ref = r->future;
+ else
+ ref = r->past;
- if (num_macroblocks[MACROBLOCK_TYPE_BI_FRAME_PRED] > 0) {
- r->pipe->set_vertex_buffers(r->pipe, 4, r->vertex_bufs.all);
- r->pipe->bind_vertex_elements_state(r->pipe, r->vertex_elems_state.individual.b);
- r->textures.individual.ref[0] = r->past->texture;
- r->textures.individual.ref[1] = r->future->texture;
- r->sampler_views.individual.ref[0] = find_or_create_sampler_view(r, r->past);
- r->sampler_views.individual.ref[1] = find_or_create_sampler_view(r, r->future);
- r->pipe->set_fragment_sampler_views(r->pipe, 5, r->sampler_views.all);
- r->pipe->bind_fragment_sampler_states(r->pipe, 5, r->samplers.all);
- r->pipe->bind_vs_state(r->pipe, r->b_vs[0]);
- r->pipe->bind_fs_state(r->pipe, r->b_fs[0]);
-
- util_draw_arrays(r->pipe, PIPE_PRIM_QUADS, vb_start,
- num_macroblocks[MACROBLOCK_TYPE_BI_FRAME_PRED] * 4);
- vb_start += num_macroblocks[MACROBLOCK_TYPE_BI_FRAME_PRED] * 4;
+ r->textures.individual.ref[0] = ref->texture;
+ r->sampler_views.individual.ref[0] = find_or_create_sampler_view(r, ref);
}
- if (num_macroblocks[MACROBLOCK_TYPE_BI_FIELD_PRED] > 0) {
- r->pipe->set_vertex_buffers(r->pipe, 4, r->vertex_bufs.all);
- r->pipe->bind_vertex_elements_state(r->pipe, r->vertex_elems_state.individual.b);
- r->textures.individual.ref[0] = r->past->texture;
- r->textures.individual.ref[1] = r->future->texture;
- r->sampler_views.individual.ref[0] = find_or_create_sampler_view(r, r->past);
- r->sampler_views.individual.ref[1] = find_or_create_sampler_view(r, r->future);
- r->pipe->set_fragment_sampler_views(r->pipe, 5, r->sampler_views.all);
- r->pipe->bind_fragment_sampler_states(r->pipe, 5, r->samplers.all);
- r->pipe->bind_vs_state(r->pipe, r->b_vs[1]);
- r->pipe->bind_fs_state(r->pipe, r->b_fs[1]);
-
- util_draw_arrays(r->pipe, PIPE_PRIM_QUADS, vb_start,
- num_macroblocks[MACROBLOCK_TYPE_BI_FIELD_PRED] * 4);
- vb_start += num_macroblocks[MACROBLOCK_TYPE_BI_FIELD_PRED] * 4;
+ r->pipe->set_fragment_sampler_views(r->pipe, 3 + ref_frames, r->sampler_views.all);
+ r->pipe->bind_fragment_sampler_states(r->pipe, 3 + ref_frames, r->samplers.all);
+ r->pipe->bind_vs_state(r->pipe, handler->vs);
+ r->pipe->bind_fs_state(r->pipe, handler->fs);
+
+ util_draw_arrays(r->pipe, PIPE_PRIM_QUADS, vb_start, num_macroblocks);
+ return num_macroblocks;
+}
+
+static void
+get_motion_vectors(struct pipe_mpeg12_macroblock *mb, struct vertex2f mv[4])
+{
+ switch (mb->mb_type) {
+ case PIPE_MPEG12_MACROBLOCK_TYPE_BI:
+ {
+ if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME) {
+
+ mv[1].x = mb->pmv[0][1][0];
+ mv[1].y = mb->pmv[0][1][1];
+
+ } else {
+ mv[2].x = mb->pmv[0][1][0];
+ mv[2].y = mb->pmv[0][1][1] - (mb->pmv[0][1][1] % 4);
+
+ mv[3].x = mb->pmv[1][1][0];
+ mv[3].y = mb->pmv[1][1][1] - (mb->pmv[1][1][1] % 4);
+
+ if(mb->mvfs[0][1]) mv[2].y += 2;
+ if(!mb->mvfs[1][1]) mv[3].y -= 2;
+ }
+
+ /* fall-through */
+ }
+ case PIPE_MPEG12_MACROBLOCK_TYPE_FWD:
+ case PIPE_MPEG12_MACROBLOCK_TYPE_BKWD:
+ {
+ if (mb->mb_type == PIPE_MPEG12_MACROBLOCK_TYPE_BKWD) {
+
+ if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME) {
+ mv[0].x = mb->pmv[0][1][0];
+ mv[0].y = mb->pmv[0][1][1];
+
+ } else {
+ mv[0].x = mb->pmv[0][1][0];
+ mv[0].y = mb->pmv[0][1][1] - (mb->pmv[0][1][1] % 4);
+
+ mv[1].x = mb->pmv[1][1][0];
+ mv[1].y = mb->pmv[1][1][1] - (mb->pmv[1][1][1] % 4);
+
+ if(mb->mvfs[0][1]) mv[0].y += 2;
+ if(!mb->mvfs[1][1]) mv[1].y -= 2;
+ }
+
+ } else {
+
+ if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME) {
+ mv[0].x = mb->pmv[0][0][0];
+ mv[0].y = mb->pmv[0][0][1];
+
+ } else {
+ mv[0].x = mb->pmv[0][0][0];
+ mv[0].y = mb->pmv[0][0][1] - (mb->pmv[0][0][1] % 4);
+
+ mv[1].x = mb->pmv[1][0][0];
+ mv[1].y = mb->pmv[1][0][1] - (mb->pmv[1][0][1] % 4);
+
+ if(mb->mvfs[0][0]) mv[0].y += 2;
+ if(!mb->mvfs[1][0]) mv[1].y -= 2;
+ }
+ }
+ }
+ default:
+ break;
}
+}
- r->pipe->flush(r->pipe, PIPE_FLUSH_RENDER_CACHE, r->fence);
+static bool
+empty_block(enum pipe_video_chroma_format chroma_format,
+ unsigned cbp, unsigned component,
+ unsigned x, unsigned y)
+{
+ /* TODO: Implement 422, 444 */
+ assert(chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
- r->num_macroblocks = 0;
+ if(component == 0) /*luma*/
+ return !(cbp & (1 << (5 - (x + y * 2))));
+ else /*cr cb*/
+ return !(cbp & (1 << (2 - component)));
}
static void
-update_render_target(struct vl_mpeg12_mc_renderer *r)
+grab_vectors(struct vl_mpeg12_mc_renderer *r,
+ struct pipe_mpeg12_macroblock *mb)
{
- struct pipe_transfer *buf_transfer;
- struct vertex_shader_consts *vs_consts;
+ enum VL_MACROBLOCK_TYPE type;
+ struct vl_mc_mbtype_handler *handler;
+ struct vertex2f mv[4];
+ struct vertex_stream_0 info;
- vs_consts = pipe_buffer_map
- (
- r->pipe, r->vs_const_buf,
- PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
- &buf_transfer
- );
+ unsigned ref_frames, mv_per_frame;
+ unsigned i, j, pos;
+
+ assert(r);
+ assert(mb);
+
+ type = get_macroblock_type(mb);
+
+ ref_frames = const_mbtype_config[type][0];
+ mv_per_frame = const_mbtype_config[type][1];
- vs_consts->norm.x = 1.0f / r->surface->width;
- vs_consts->norm.y = 1.0f / r->surface->height;
+ handler = &r->mbtype_handlers[type];
- pipe_buffer_unmap(r->pipe, r->vs_const_buf, buf_transfer);
+ pos = handler->pos.num_verts;
- r->fb_state.cbufs[0] = r->surface;
+ info.pos.x = mb->mbx;
+ info.pos.y = mb->mby;
+ for ( i = 0; i < 2; ++i) {
+ for ( j = 0; j < 2; ++j) {
+ info.eb[i][j].y = empty_block(r->chroma_format, mb->cbp, 0, j, i);
+ info.eb[i][j].cr = empty_block(r->chroma_format, mb->cbp, 1, j, i);
+ info.eb[i][j].cb = empty_block(r->chroma_format, mb->cbp, 2, j, i);
+ }
+ }
+ info.interlaced = mb->dct_type == PIPE_MPEG12_DCT_TYPE_FIELD ? 1.0f : 0.0f;
+ vl_vb_add_block(&handler->pos, (float*)&info);
+
+ get_motion_vectors(mb, mv);
+ for ( j = 0; j < ref_frames * mv_per_frame; ++j )
+ vl_vb_add_block(&handler->mv[j], (float*)&mv[j]);
}
static void
for (y = 0; y < 2; ++y) {
for (x = 0; x < 2; ++x, ++tb) {
- bool eb = !(cbp & (1 << (5 - tb)));
- vl_idct_add_block(&r->idct_y, mbx * 2 + x, mby * 2 + y, eb ? NULL : blocks);
- blocks += eb ? 0 : BLOCK_WIDTH * BLOCK_HEIGHT;
+ if (!empty_block(r->chroma_format, cbp, 0, x, y)) {
+ vl_idct_add_block(&r->idct_y, mbx * 2 + x, mby * 2 + y, blocks);
+ blocks += BLOCK_WIDTH * BLOCK_HEIGHT;
+ }
}
}
/* TODO: Implement 422, 444 */
assert(r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
- for (tb = 0; tb < 2; ++tb) {
- bool eb = !(cbp & (1 << (1 - tb)));
- if(tb == 0)
- vl_idct_add_block(&r->idct_cb, mbx, mby, eb ? NULL : blocks);
- else
- vl_idct_add_block(&r->idct_cr, mbx, mby, eb ? NULL : blocks);
- blocks += eb ? 0 : BLOCK_WIDTH * BLOCK_HEIGHT;
+ for (tb = 1; tb < 3; ++tb) {
+ if (!empty_block(r->chroma_format, cbp, tb, 0, 0)) {
+ if(tb == 1)
+ vl_idct_add_block(&r->idct_cb, mbx, mby, blocks);
+ else
+ vl_idct_add_block(&r->idct_cr, mbx, mby, blocks);
+ blocks += BLOCK_WIDTH * BLOCK_HEIGHT;
+ }
}
}
assert(mb->blocks);
assert(r->num_macroblocks < r->macroblocks_per_batch);
- memcpy(&r->macroblock_buf[r->num_macroblocks], mb,
- sizeof(struct pipe_mpeg12_macroblock));
-
+ grab_vectors(r, mb);
grab_blocks(r, mb->mbx, mb->mby, mb->dct_type, mb->cbp, mb->blocks);
++r->num_macroblocks;
bool
vl_mpeg12_mc_renderer_init(struct vl_mpeg12_mc_renderer *renderer,
struct pipe_context *pipe,
- unsigned picture_width,
- unsigned picture_height,
+ unsigned buffer_width,
+ unsigned buffer_height,
enum pipe_video_chroma_format chroma_format,
- enum VL_MPEG12_MC_RENDERER_BUFFER_MODE bufmode,
- bool pot_buffers)
+ enum VL_MPEG12_MC_RENDERER_BUFFER_MODE bufmode)
{
assert(renderer);
assert(pipe);
+
/* TODO: Implement other policies */
assert(bufmode == VL_MPEG12_MC_RENDERER_BUFFER_PICTURE);
- /* TODO: Non-pot buffers untested, probably doesn't work without changes to texcoord generation, vert shader, etc */
- assert(pot_buffers);
memset(renderer, 0, sizeof(struct vl_mpeg12_mc_renderer));
renderer->pipe = pipe;
- renderer->picture_width = picture_width;
- renderer->picture_height = picture_height;
+ renderer->buffer_width = buffer_width;
+ renderer->buffer_height = buffer_height;
renderer->chroma_format = chroma_format;
renderer->bufmode = bufmode;
- renderer->pot_buffers = pot_buffers;
renderer->texview_map = util_new_keymap(sizeof(struct pipe_surface*), -1,
texview_map_delete);
if (!renderer->texview_map)
return false;
- if (!init_pipe_state(renderer)) {
- util_delete_keymap(renderer->texview_map, renderer->pipe);
- return false;
- }
- if (!init_shaders(renderer)) {
- util_delete_keymap(renderer->texview_map, renderer->pipe);
- cleanup_pipe_state(renderer);
- return false;
- }
- if (!init_buffers(renderer)) {
- util_delete_keymap(renderer->texview_map, renderer->pipe);
- cleanup_shaders(renderer);
- cleanup_pipe_state(renderer);
- return false;
- }
+ if (!init_pipe_state(renderer))
+ goto error_pipe_state;
- if (!init_const_buffers(renderer)) {
- util_delete_keymap(renderer->texview_map, renderer->pipe);
- cleanup_pipe_state(renderer);
- cleanup_shaders(renderer);
- cleanup_buffers(renderer);
- return false;
- }
+ if (!init_buffers(renderer))
+ goto error_buffers;
renderer->surface = NULL;
renderer->past = NULL;
renderer->future = NULL;
renderer->num_macroblocks = 0;
- vl_idct_init(&renderer->idct_y, pipe, renderer->textures.individual.y);
- vl_idct_init(&renderer->idct_cr, pipe, renderer->textures.individual.cr);
- vl_idct_init(&renderer->idct_cb, pipe, renderer->textures.individual.cb);
-
return true;
+
+error_buffers:
+ cleanup_pipe_state(renderer);
+
+error_pipe_state:
+ util_delete_keymap(renderer->texview_map, renderer->pipe);
+ return false;
}
void
{
assert(renderer);
- vl_idct_cleanup(&renderer->idct_y);
- vl_idct_cleanup(&renderer->idct_cr);
- vl_idct_cleanup(&renderer->idct_cb);
-
util_delete_keymap(renderer->texview_map, renderer->pipe);
cleanup_pipe_state(renderer);
- cleanup_shaders(renderer);
cleanup_buffers(renderer);
pipe_surface_reference(&renderer->surface, NULL);
*mpeg12_macroblocks,
struct pipe_fence_handle **fence)
{
- bool new_surface = false;
-
assert(renderer);
assert(surface);
assert(num_macroblocks);
assert(mpeg12_macroblocks);
- if (renderer->surface) {
- if (surface != renderer->surface) {
- if (renderer->num_macroblocks > 0) {
- flush(renderer);
- }
-
- new_surface = true;
- }
-
- /* If the surface we're rendering hasn't changed the ref frames shouldn't change. */
- assert(surface != renderer->surface || renderer->past == past);
- assert(surface != renderer->surface || renderer->future == future);
- }
- else
- new_surface = true;
-
- if (new_surface) {
+ if (surface != renderer->surface) {
pipe_surface_reference(&renderer->surface, surface);
pipe_surface_reference(&renderer->past, past);
pipe_surface_reference(&renderer->future, future);
renderer->fence = fence;
- update_render_target(renderer);
}
while (num_macroblocks) {
num_macroblocks -= num_to_submit;
if (renderer->num_macroblocks == renderer->macroblocks_per_batch) {
- flush(renderer);
+ vl_mpeg12_mc_renderer_flush(renderer);
+
/* Next time we get this surface it may have new ref frames */
pipe_surface_reference(&renderer->surface, NULL);
pipe_surface_reference(&renderer->past, NULL);
}
}
}
+
+void
+vl_mpeg12_mc_renderer_flush(struct vl_mpeg12_mc_renderer *renderer)
+{
+ unsigned num_verts[VL_NUM_MACROBLOCK_TYPES] = { 0 };
+ unsigned vb_start = 0, i;
+
+ assert(renderer);
+ assert(renderer->num_macroblocks <= renderer->macroblocks_per_batch);
+
+ if (renderer->num_macroblocks == 0)
+ return;
+
+ vl_idct_unmap_buffers(&renderer->idct_luma, &renderer->idct_y);
+ vl_idct_unmap_buffers(&renderer->idct_chroma, &renderer->idct_cr);
+ vl_idct_unmap_buffers(&renderer->idct_chroma, &renderer->idct_cb);
+
+ vl_idct_flush(&renderer->idct_luma, &renderer->idct_y);
+ vl_idct_flush(&renderer->idct_chroma, &renderer->idct_cr);
+ vl_idct_flush(&renderer->idct_chroma, &renderer->idct_cb);
+
+ upload_vertex_stream(renderer, num_verts);
+
+ renderer->fb_state.cbufs[0] = renderer->surface;
+ renderer->pipe->bind_rasterizer_state(renderer->pipe, renderer->rs_state);
+ renderer->pipe->set_framebuffer_state(renderer->pipe, &renderer->fb_state);
+ renderer->pipe->set_viewport_state(renderer->pipe, &renderer->viewport);
+
+ for (i = 0; i < VL_NUM_MACROBLOCK_TYPES; ++i) {
+ if (num_verts[i] > 0)
+ vb_start += flush_mbtype_handler(renderer, i, vb_start, num_verts[i]);
+ }
+
+ renderer->pipe->flush(renderer->pipe, PIPE_FLUSH_RENDER_CACHE, renderer->fence);
+
+ vl_idct_map_buffers(&renderer->idct_luma, &renderer->idct_y);
+ vl_idct_map_buffers(&renderer->idct_chroma, &renderer->idct_cr);
+ vl_idct_map_buffers(&renderer->idct_chroma, &renderer->idct_cb);
+
+ renderer->num_macroblocks = 0;
+}