assert(bitstream);
- if (cur_bit + how_many_bits > sizeof(unsigned) * CHAR_BIT)
- {
- return grab_bits(cur_bit, sizeof(unsigned) * CHAR_BIT - cur_bit,
- bitstream[cur_int]) |
- grab_bits(0, cur_bit + how_many_bits - sizeof(unsigned) * CHAR_BIT,
- bitstream[cur_int + 1]) << (sizeof(unsigned) * CHAR_BIT - cur_bit);
+ if (cur_bit + how_many_bits > sizeof(unsigned) * CHAR_BIT) {
+ unsigned lower = grab_bits(cur_bit, sizeof(unsigned) * CHAR_BIT - cur_bit,
+ bitstream[cur_int]);
+ unsigned upper = grab_bits(0, cur_bit + how_many_bits - sizeof(unsigned) * CHAR_BIT,
+ bitstream[cur_int + 1])
+ return lower | upper << (sizeof(unsigned) * CHAR_BIT - cur_bit);
}
else
return grab_bits(cur_bit, how_many_bits, bitstream[cur_int]);
cursor = parser->cursor;
cur_bitstream = parser->cur_bitstream;
- while (1)
- {
+ while (1) {
unsigned bits_left = parser->sizes[cur_bitstream] * CHAR_BIT - cursor;
unsigned bits_to_show = how_many_bits > bits_left ? bits_left : how_many_bits;
bits |= show_bits(cursor, bits_to_show,
parser->bitstreams[cur_bitstream]) << shift;
- if (how_many_bits > bits_to_show)
- {
+ if (how_many_bits > bits_to_show) {
how_many_bits -= bits_to_show;
cursor = 0;
++cur_bitstream;
parser->cursor += how_many_bits;
- while (parser->cursor > parser->sizes[parser->cur_bitstream] * CHAR_BIT)
- {
+ while (parser->cursor > parser->sizes[parser->cur_bitstream] * CHAR_BIT) {
parser->cursor -= parser->sizes[parser->cur_bitstream++] * CHAR_BIT;
assert(parser->cur_bitstream < parser->num_bitstreams);
}
c = parser->cursor - how_many_bits;
- while (c < 0)
- {
+ while (c < 0) {
c += parser->sizes[parser->cur_bitstream--] * CHAR_BIT;
assert(parser->cur_bitstream < parser->num_bitstreams);
}
ti = 3;
/*
- * decl i0 ; Vertex pos
- * decl i1 ; Vertex texcoords
+ * decl i0 ; Vertex pos
+ * decl i1 ; Vertex texcoords
*/
- for (unsigned i = 0; i < 2; i++)
- {
+ for (unsigned i = 0; i < 2; i++) {
decl = vl_decl_input(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i);
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
}
/*
- * decl c0 ; Scaling vector to scale vertex pos rect to destination size
- * decl c1 ; Translation vector to move vertex pos rect into position
- * decl c2 ; Scaling vector to scale texcoord rect to source size
- * decl c3 ; Translation vector to move texcoord rect into position
+ * decl c0 ; Scaling vector to scale vertex pos rect to destination size
+ * decl c1 ; Translation vector to move vertex pos rect into position
+ * decl c2 ; Scaling vector to scale texcoord rect to source size
+ * decl c3 ; Translation vector to move texcoord rect into position
*/
decl = vl_decl_constants(TGSI_SEMANTIC_GENERIC, 0, 0, 3);
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
/*
- * decl o0 ; Vertex pos
- * decl o1 ; Vertex texcoords
+ * decl o0 ; Vertex pos
+ * decl o1 ; Vertex texcoords
*/
- for (unsigned i = 0; i < 2; i++)
- {
+ for (unsigned i = 0; i < 2; i++) {
decl = vl_decl_output(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i);
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
}
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
/*
- * mad o0, i0, c0, c1 ; Scale and translate unit output rect to destination size and pos
- * mad o1, i1, c2, c3 ; Scale and translate unit texcoord rect to source size and pos
+ * mad o0, i0, c0, c1 ; Scale and translate unit output rect to destination size and pos
+ * mad o1, i1, c2, c3 ; Scale and translate unit texcoord rect to source size and pos
*/
- for (unsigned i = 0; i < 2; ++i)
- {
+ for (unsigned i = 0; i < 2; ++i) {
inst = vl_inst4(TGSI_OPCODE_MAD, TGSI_FILE_OUTPUT, i, TGSI_FILE_INPUT, i, TGSI_FILE_CONSTANT, i * 2, TGSI_FILE_CONSTANT, i * 2 + 1);
ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
}
ti = 3;
- /* decl i0 ; Texcoords for s0 */
+ /* decl i0 ; Texcoords for s0 */
decl = vl_decl_interpolated_input(TGSI_SEMANTIC_GENERIC, 1, 0, 0, TGSI_INTERPOLATE_LINEAR);
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
/*
- * decl c0 ; Bias vector for CSC
- * decl c1-c4 ; CSC matrix c1-c4
+ * decl c0 ; Bias vector for CSC
+ * decl c1-c4 ; CSC matrix c1-c4
*/
decl = vl_decl_constants(TGSI_SEMANTIC_GENERIC, 0, 0, 4);
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
- /* decl o0 ; Fragment color */
+ /* decl o0 ; Fragment color */
decl = vl_decl_output(TGSI_SEMANTIC_COLOR, 0, 0, 0);
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
decl = vl_decl_temps(0, 0);
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
- /* decl s0 ; Sampler for tex containing picture to display */
+ /* decl s0 ; Sampler for tex containing picture to display */
decl = vl_decl_samplers(0, 0);
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
- /* tex2d t0, i0, s0 ; Read src pixel */
+ /* tex2d t0, i0, s0 ; Read src pixel */
inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_INPUT, 0, TGSI_FILE_SAMPLER, 0);
ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
- /* sub t0, t0, c0 ; Subtract bias vector from pixel */
+ /* sub t0, t0, c0 ; Subtract bias vector from pixel */
inst = vl_inst3(TGSI_OPCODE_SUB, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_CONSTANT, 0);
ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
/*
- * dp4 o0.x, t0, c1 ; Multiply pixel by the color conversion matrix
+ * dp4 o0.x, t0, c1 ; Multiply pixel by the color conversion matrix
* dp4 o0.y, t0, c2
* dp4 o0.z, t0, c3
*/
- for (unsigned i = 0; i < 3; ++i)
- {
+ for (unsigned i = 0; i < 3; ++i) {
inst = vl_inst3(TGSI_OPCODE_DP4, TGSI_FILE_OUTPUT, 0, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_CONSTANT, i + 1);
inst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_X << i;
ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
c->vertex_bufs[0].buffer_offset = 0;
c->vertex_bufs[0].buffer = pipe_buffer_create
(
- c->pipe->screen,
- 1,
- PIPE_BUFFER_USAGE_VERTEX,
- sizeof(struct vertex2f) * 4
+ c->pipe->screen,
+ 1,
+ PIPE_BUFFER_USAGE_VERTEX,
+ sizeof(struct vertex2f) * 4
);
memcpy
if (!init_pipe_state(compositor))
return false;
- if (!init_shaders(compositor))
- {
+ if (!init_shaders(compositor)) {
cleanup_pipe_state(compositor);
return false;
}
- if (!init_buffers(compositor))
- {
+ if (!init_buffers(compositor)) {
cleanup_shaders(compositor);
cleanup_pipe_state(compositor);
return false;
* decl i2 ; Chroma Cb texcoords
* decl i3 ; Chroma Cr texcoords
*/
- for (unsigned i = 0; i < 4; i++)
- {
+ for (unsigned i = 0; i < 4; i++) {
decl = vl_decl_input(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i);
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
}
* decl o2 ; Chroma Cb texcoords
* decl o3 ; Chroma Cr texcoords
*/
- for (unsigned i = 0; i < 4; i++)
- {
+ for (unsigned i = 0; i < 4; i++) {
decl = vl_decl_output(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i);
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
}
* mov o2, i2 ; Move input chroma Cb texcoords to output
* mov o3, i3 ; Move input chroma Cr texcoords to output
*/
- for (unsigned i = 0; i < 4; ++i)
- {
+ for (unsigned i = 0; i < 4; ++i) {
inst = vl_inst2(TGSI_OPCODE_MOV, TGSI_FILE_OUTPUT, i, TGSI_FILE_INPUT, i);
ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
}
* decl i1 ; Chroma Cb texcoords
* decl i2 ; Chroma Cr texcoords
*/
- for (unsigned i = 0; i < 3; ++i)
- {
+ for (unsigned i = 0; i < 3; ++i) {
decl = vl_decl_interpolated_input(TGSI_SEMANTIC_GENERIC, i + 1, i, i, TGSI_INTERPOLATE_LINEAR);
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
}
* decl s1 ; Sampler for chroma Cb texture
* decl s2 ; Sampler for chroma Cr texture
*/
- for (unsigned i = 0; i < 3; ++i)
- {
+ for (unsigned i = 0; i < 3; ++i) {
decl = vl_decl_samplers(i, i);
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
}
* tex2d t1, i2, s2 ; Read texel from chroma Cr texture
* mov t0.z, t1.x ; Move Cr sample into .z component
*/
- for (unsigned i = 0; i < 3; ++i)
- {
+ for (unsigned i = 0; i < 3; ++i) {
inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_INPUT, i, TGSI_FILE_SAMPLER, i);
ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
* decl i4 ; Ref surface top field texcoords
* decl i5 ; Ref surface bottom field texcoords (unused, packed in the same stream)
*/
- for (unsigned i = 0; i < 6; i++)
- {
+ for (unsigned i = 0; i < 6; i++) {
decl = vl_decl_input(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i);
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
}
* decl o3 ; Chroma Cr texcoords
* decl o4 ; Ref macroblock texcoords
*/
- for (unsigned i = 0; i < 5; i++)
- {
+ for (unsigned i = 0; i < 5; i++) {
decl = vl_decl_output(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i);
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
}
* mov o2, i2 ; Move input chroma Cb texcoords to output
* mov o3, i3 ; Move input chroma Cr texcoords to output
*/
- for (unsigned i = 0; i < 4; ++i)
- {
+ for (unsigned i = 0; i < 4; ++i) {
inst = vl_inst2(TGSI_OPCODE_MOV, TGSI_FILE_OUTPUT, i, TGSI_FILE_INPUT, i);
ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
}
* decl i2 ; Chroma Cr texcoords
* decl i3 ; Ref macroblock texcoords
*/
- for (unsigned i = 0; i < 4; ++i)
- {
+ for (unsigned i = 0; i < 4; ++i) {
decl = vl_decl_interpolated_input(TGSI_SEMANTIC_GENERIC, i + 1, i, i, TGSI_INTERPOLATE_LINEAR);
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
}
* decl s2 ; Sampler for chroma Cr texture
* decl s3 ; Sampler for ref surface texture
*/
- for (unsigned i = 0; i < 4; ++i)
- {
+ for (unsigned i = 0; i < 4; ++i) {
decl = vl_decl_samplers(i, i);
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
}
* tex2d t1, i2, s2 ; Read texel from chroma Cr texture
* mov t0.z, t1.x ; Move Cr sample into .z component
*/
- for (unsigned i = 0; i < 3; ++i)
- {
+ for (unsigned i = 0; i < 3; ++i) {
inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_INPUT, i, TGSI_FILE_SAMPLER, i);
ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
* decl i6 ; Second ref macroblock top field texcoords
* decl i7 ; Second ref macroblock bottom field texcoords (unused, packed in the same stream)
*/
- for (unsigned i = 0; i < 8; i++)
- {
+ for (unsigned i = 0; i < 8; i++) {
decl = vl_decl_input(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i);
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
}
* decl o4 ; First ref macroblock texcoords
* decl o5 ; Second ref macroblock texcoords
*/
- for (unsigned i = 0; i < 6; i++)
- {
+ for (unsigned i = 0; i < 6; i++) {
decl = vl_decl_output(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i);
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
}
* mov o2, i2 ; Move input chroma Cb texcoords to output
* mov o3, i3 ; Move input chroma Cr texcoords to output
*/
- for (unsigned i = 0; i < 4; ++i)
- {
+ for (unsigned i = 0; i < 4; ++i) {
inst = vl_inst2(TGSI_OPCODE_MOV, TGSI_FILE_OUTPUT, i, TGSI_FILE_INPUT, i);
ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
}
* add o4, i0, i4 ; Translate vertex pos by motion vec to form first ref macroblock texcoords
* add o5, i0, i6 ; Translate vertex pos by motion vec to form second ref macroblock texcoords
*/
- for (unsigned i = 0; i < 2; ++i)
- {
+ for (unsigned i = 0; i < 2; ++i) {
inst = vl_inst3(TGSI_OPCODE_ADD, TGSI_FILE_OUTPUT, i + 4, TGSI_FILE_INPUT, 0, TGSI_FILE_INPUT, (i + 2) * 2);
ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
}
* decl i3 ; First ref macroblock texcoords
* decl i4 ; Second ref macroblock texcoords
*/
- for (unsigned i = 0; i < 5; ++i)
- {
+ for (unsigned i = 0; i < 5; ++i) {
decl = vl_decl_interpolated_input(TGSI_SEMANTIC_GENERIC, i + 1, i, i, TGSI_INTERPOLATE_LINEAR);
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
}
* decl s3 ; Sampler for first ref surface texture
* decl s4 ; Sampler for second ref surface texture
*/
- for (unsigned i = 0; i < 5; ++i)
- {
+ for (unsigned i = 0; i < 5; ++i) {
decl = vl_decl_samplers(i, i);
ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
}
* tex2d t1, i2, s2 ; Read texel from chroma Cr texture
* mov t0.z, t1.x ; Move Cr sample into .z component
*/
- for (unsigned i = 0; i < 3; ++i)
- {
+ for (unsigned i = 0; i < 3; ++i) {
inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_INPUT, i, TGSI_FILE_SAMPLER, i);
ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
* tex2d t1, i3, s3 ; Read texel from first ref macroblock
* tex2d t2, i4, s4 ; Read texel from second ref macroblock
*/
- for (unsigned i = 0; i < 2; ++i)
- {
+ for (unsigned i = 0; i < 2; ++i) {
inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, i + 1, TGSI_FILE_INPUT, i + 3, TGSI_FILE_SAMPLER, i + 3);
ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
}
{
assert(r);
- for (unsigned i = 0; i < 3; ++i)
- {
+ for (unsigned i = 0; i < 3; ++i) {
r->tex_transfer[i] = r->pipe->screen->get_tex_transfer
(
r->pipe->screen, r->textures.all[i],
{
assert(r);
- for (unsigned i = 0; i < 3; ++i)
- {
+ for (unsigned i = 0; i < 3; ++i) {
r->pipe->screen->transfer_unmap(r->pipe->screen, r->tex_transfer[i]);
r->pipe->screen->tex_transfer_destroy(r->tex_transfer[i]);
}
filters[0] = PIPE_TEX_FILTER_NEAREST;
/* Chroma filters */
if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_444 ||
- r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ONE)
- {
+ r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ONE) {
filters[1] = PIPE_TEX_FILTER_NEAREST;
filters[2] = PIPE_TEX_FILTER_NEAREST;
}
- else
- {
+ else {
filters[1] = PIPE_TEX_FILTER_LINEAR;
filters[2] = PIPE_TEX_FILTER_LINEAR;
}
filters[3] = PIPE_TEX_FILTER_LINEAR;
filters[4] = PIPE_TEX_FILTER_LINEAR;
- for (unsigned i = 0; i < 5; ++i)
- {
+ for (unsigned i = 0; i < 5; ++i) {
sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
r->textures.individual.y = r->pipe->screen->texture_create(r->pipe->screen, &template);
- if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420)
- {
+ if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
template.width[0] = r->pot_buffers ?
util_next_power_of_two(r->picture_width / 2) :
r->picture_width / 2;
sizeof(struct vertex2f) * 4 * 24 * r->macroblocks_per_batch
);
- for (unsigned i = 1; i < 3; ++i)
- {
+ for (unsigned i = 1; i < 3; ++i) {
r->vertex_bufs.all[i].stride = sizeof(struct vertex2f) * 2;
r->vertex_bufs.all[i].max_index = 24 * r->macroblocks_per_batch - 1;
r->vertex_bufs.all[i].buffer_offset = 0;
{
assert(mb);
- switch (mb->mb_type)
- {
+ switch (mb->mb_type) {
case PIPE_MPEG12_MACROBLOCK_TYPE_INTRA:
return MACROBLOCK_TYPE_INTRA;
case PIPE_MPEG12_MACROBLOCK_TYPE_FWD:
assert(ycbcr_vb);
assert(pos < r->macroblocks_per_batch);
- switch (mb->mb_type)
- {
+ switch (mb->mb_type) {
case PIPE_MPEG12_MACROBLOCK_TYPE_BI:
{
struct vertex2f *vb;
mo_vec[0].x = mb->pmv[0][1][0] * 0.5f * r->surface_tex_inv_size.x;
mo_vec[0].y = mb->pmv[0][1][1] * 0.5f * r->surface_tex_inv_size.y;
- if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME)
- {
- for (unsigned i = 0; i < 24 * 2; i += 2)
- {
+ if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME) {
+ for (unsigned i = 0; i < 24 * 2; i += 2) {
vb[i].x = mo_vec[0].x;
vb[i].y = mo_vec[0].y;
}
}
- else
- {
+ else {
mo_vec[1].x = mb->pmv[1][1][0] * 0.5f * r->surface_tex_inv_size.x;
mo_vec[1].y = mb->pmv[1][1][1] * 0.5f * r->surface_tex_inv_size.y;
- for (unsigned i = 0; i < 24 * 2; i += 2)
- {
+ for (unsigned i = 0; i < 24 * 2; i += 2) {
vb[i].x = mo_vec[0].x;
vb[i].y = mo_vec[0].y;
vb[i + 1].x = mo_vec[1].x;
vb = ref_vb[0] + pos * 2 * 24;
- if (mb->mb_type == PIPE_MPEG12_MACROBLOCK_TYPE_BKWD)
- {
+ if (mb->mb_type == PIPE_MPEG12_MACROBLOCK_TYPE_BKWD) {
mo_vec[0].x = mb->pmv[0][1][0] * 0.5f * r->surface_tex_inv_size.x;
mo_vec[0].y = mb->pmv[0][1][1] * 0.5f * r->surface_tex_inv_size.y;
- if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FIELD)
- {
+ if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FIELD) {
mo_vec[1].x = mb->pmv[1][1][0] * 0.5f * r->surface_tex_inv_size.x;
mo_vec[1].y = mb->pmv[1][1][1] * 0.5f * r->surface_tex_inv_size.y;
}
}
- else
- {
+ else {
mo_vec[0].x = mb->pmv[0][0][0] * 0.5f * r->surface_tex_inv_size.x;
mo_vec[0].y = mb->pmv[0][0][1] * 0.5f * r->surface_tex_inv_size.y;
- if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FIELD)
- {
+ if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FIELD) {
mo_vec[1].x = mb->pmv[1][0][0] * 0.5f * r->surface_tex_inv_size.x;
mo_vec[1].y = mb->pmv[1][0][1] * 0.5f * r->surface_tex_inv_size.y;
}
}
- if (mb->mb_type == PIPE_MPEG12_MOTION_TYPE_FRAME)
- {
- for (unsigned i = 0; i < 24 * 2; i += 2)
- {
+ if (mb->mb_type == PIPE_MPEG12_MOTION_TYPE_FRAME) {
+ for (unsigned i = 0; i < 24 * 2; i += 2) {
vb[i].x = mo_vec[0].x;
vb[i].y = mo_vec[0].y;
}
}
- else
- {
- for (unsigned i = 0; i < 24 * 2; i += 2)
- {
+ else {
+ for (unsigned i = 0; i < 24 * 2; i += 2) {
vb[i].x = mo_vec[0].x;
vb[i].y = mo_vec[0].y;
vb[i + 1].x = mo_vec[1].x;
assert(r);
assert(num_macroblocks);
- for (unsigned i = 0; i < r->num_macroblocks; ++i)
- {
+ for (unsigned i = 0; i < r->num_macroblocks; ++i) {
enum MACROBLOCK_TYPE mb_type = get_macroblock_type(&r->macroblock_buf[i]);
++num_macroblocks[mb_type];
}
PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD
);
- for (unsigned i = 0; i < r->num_macroblocks; ++i)
- {
+ for (unsigned i = 0; i < r->num_macroblocks; ++i) {
enum MACROBLOCK_TYPE mb_type = get_macroblock_type(&r->macroblock_buf[i]);
gen_macroblock_verts(r, &r->macroblock_buf[i], offset[mb_type],
r->pipe->set_constant_buffer(r->pipe, PIPE_SHADER_FRAGMENT, 0,
&r->fs_const_buf);
- if (num_macroblocks[MACROBLOCK_TYPE_INTRA] > 0)
- {
+ if (num_macroblocks[MACROBLOCK_TYPE_INTRA] > 0) {
r->pipe->set_vertex_buffers(r->pipe, 1, r->vertex_bufs.all);
r->pipe->set_vertex_elements(r->pipe, 4, r->vertex_elems);
r->pipe->set_sampler_textures(r->pipe, 3, r->textures.all);
vb_start += num_macroblocks[MACROBLOCK_TYPE_INTRA] * 24;
}
- if (num_macroblocks[MACROBLOCK_TYPE_FWD_FRAME_PRED] > 0)
- {
+ if (num_macroblocks[MACROBLOCK_TYPE_FWD_FRAME_PRED] > 0) {
r->pipe->set_vertex_buffers(r->pipe, 2, r->vertex_bufs.all);
r->pipe->set_vertex_elements(r->pipe, 6, r->vertex_elems);
r->textures.individual.ref[0] = r->past;
vb_start += num_macroblocks[MACROBLOCK_TYPE_FWD_FRAME_PRED] * 24;
}
- if (false /*num_macroblocks[MACROBLOCK_TYPE_FWD_FIELD_PRED] > 0 */ )
- {
+ if (false /*num_macroblocks[MACROBLOCK_TYPE_FWD_FIELD_PRED] > 0 */ ) {
r->pipe->set_vertex_buffers(r->pipe, 2, r->vertex_bufs.all);
r->pipe->set_vertex_elements(r->pipe, 6, r->vertex_elems);
r->textures.individual.ref[0] = r->past;
vb_start += num_macroblocks[MACROBLOCK_TYPE_FWD_FIELD_PRED] * 24;
}
- if (num_macroblocks[MACROBLOCK_TYPE_BKWD_FRAME_PRED] > 0)
- {
+ if (num_macroblocks[MACROBLOCK_TYPE_BKWD_FRAME_PRED] > 0) {
r->pipe->set_vertex_buffers(r->pipe, 2, r->vertex_bufs.all);
r->pipe->set_vertex_elements(r->pipe, 6, r->vertex_elems);
r->textures.individual.ref[0] = r->future;
vb_start += num_macroblocks[MACROBLOCK_TYPE_BKWD_FRAME_PRED] * 24;
}
- if (false /*num_macroblocks[MACROBLOCK_TYPE_BKWD_FIELD_PRED] > 0 */ )
- {
+ if (false /*num_macroblocks[MACROBLOCK_TYPE_BKWD_FIELD_PRED] > 0 */ ) {
r->pipe->set_vertex_buffers(r->pipe, 2, r->vertex_bufs.all);
r->pipe->set_vertex_elements(r->pipe, 6, r->vertex_elems);
r->textures.individual.ref[0] = r->future;
vb_start += num_macroblocks[MACROBLOCK_TYPE_BKWD_FIELD_PRED] * 24;
}
- if (num_macroblocks[MACROBLOCK_TYPE_BI_FRAME_PRED] > 0)
- {
+ if (num_macroblocks[MACROBLOCK_TYPE_BI_FRAME_PRED] > 0) {
r->pipe->set_vertex_buffers(r->pipe, 3, r->vertex_bufs.all);
r->pipe->set_vertex_elements(r->pipe, 8, r->vertex_elems);
r->textures.individual.ref[0] = r->past;
vb_start += num_macroblocks[MACROBLOCK_TYPE_BI_FRAME_PRED] * 24;
}
- if (false /*num_macroblocks[MACROBLOCK_TYPE_BI_FIELD_PRED] > 0 */ )
- {
+ if (false /*num_macroblocks[MACROBLOCK_TYPE_BI_FIELD_PRED] > 0 */ ) {
r->pipe->set_vertex_buffers(r->pipe, 3, r->vertex_bufs.all);
r->pipe->set_vertex_elements(r->pipe, 8, r->vertex_elems);
r->textures.individual.ref[0] = r->past;
tex_pitch = r->tex_transfer[0]->stride / r->tex_transfer[0]->block.size;
texels = r->texels[0] + mbpy * tex_pitch + mbpx;
- for (unsigned y = 0; y < 2; ++y)
- {
- for (unsigned x = 0; x < 2; ++x, ++tb)
- {
- if ((cbp >> (5 - tb)) & 1)
- {
- if (dct_type == PIPE_MPEG12_DCT_TYPE_FRAME)
- {
+ for (unsigned y = 0; y < 2; ++y) {
+ for (unsigned x = 0; x < 2; ++x, ++tb) {
+ if ((cbp >> (5 - tb)) & 1) {
+ if (dct_type == PIPE_MPEG12_DCT_TYPE_FRAME) {
grab_frame_coded_block(blocks + sb * BLOCK_WIDTH * BLOCK_HEIGHT,
texels + y * tex_pitch * BLOCK_WIDTH +
x * BLOCK_WIDTH, tex_pitch);
}
- else
- {
+ else {
grab_field_coded_block(blocks + sb * BLOCK_WIDTH * BLOCK_HEIGHT,
texels + y * tex_pitch + x * BLOCK_WIDTH,
tex_pitch);
++sb;
}
- else if (r->eb_handling != VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_NONE)
- {
+ else if (r->eb_handling != VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_NONE) {
if (r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ALL ||
- ZERO_BLOCK_IS_NIL(r->zero_block[0]))
- {
+ ZERO_BLOCK_IS_NIL(r->zero_block[0])) {
fill_zero_block(texels + y * tex_pitch * BLOCK_WIDTH + x * BLOCK_WIDTH, tex_pitch);
- if (r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ONE)
- {
+ if (r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ONE) {
r->zero_block[0].x = (mbpx + x * 8) * r->surface_tex_inv_size.x;
r->zero_block[0].y = (mbpy + y * 8) * r->surface_tex_inv_size.y;
}
mbpx /= 2;
mbpy /= 2;
- for (tb = 0; tb < 2; ++tb)
- {
+ for (tb = 0; tb < 2; ++tb) {
tex_pitch = r->tex_transfer[tb + 1]->stride / r->tex_transfer[tb + 1]->block.size;
texels = r->texels[tb + 1] + mbpy * tex_pitch + mbpx;
- if ((cbp >> (1 - tb)) & 1)
- {
+ if ((cbp >> (1 - tb)) & 1) {
grab_frame_coded_block(blocks + sb * BLOCK_WIDTH * BLOCK_HEIGHT, texels, tex_pitch);
++sb;
}
- else if (r->eb_handling != VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_NONE)
- {
+ else if (r->eb_handling != VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_NONE) {
if (r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ALL ||
- ZERO_BLOCK_IS_NIL(r->zero_block[tb + 1]))
- {
+ ZERO_BLOCK_IS_NIL(r->zero_block[tb + 1])) {
fill_zero_block(texels, tex_pitch);
- if (r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ONE)
- {
+ if (r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ONE) {
r->zero_block[tb + 1].x = (mbpx << 1) * r->surface_tex_inv_size.x;
r->zero_block[tb + 1].y = (mbpy << 1) * r->surface_tex_inv_size.y;
}
if (!init_pipe_state(renderer))
return false;
- if (!init_shaders(renderer))
- {
+ if (!init_shaders(renderer)) {
cleanup_pipe_state(renderer);
return false;
}
- if (!init_buffers(renderer))
- {
+ if (!init_buffers(renderer)) {
cleanup_shaders(renderer);
cleanup_pipe_state(renderer);
return false;
assert(num_macroblocks);
assert(mpeg12_macroblocks);
- if (renderer->surface)
- {
- if (surface != renderer->surface)
- {
- if (renderer->num_macroblocks > 0)
- {
+ if (renderer->surface) {
+ if (surface != renderer->surface) {
+ if (renderer->num_macroblocks > 0) {
xfer_buffers_unmap(renderer);
flush(renderer);
}
else
new_surface = true;
- if (new_surface)
- {
+ if (new_surface) {
renderer->surface = surface;
renderer->past = past;
renderer->future = future;
renderer->surface_tex_inv_size.y = 1.0f / surface->height[0];
}
- while (num_macroblocks)
- {
+ while (num_macroblocks) {
unsigned left_in_batch = renderer->macroblocks_per_batch - renderer->num_macroblocks;
unsigned num_to_submit = MIN2(num_macroblocks, left_in_batch);
- for (unsigned i = 0; i < num_to_submit; ++i)
- {
+ for (unsigned i = 0; i < num_to_submit; ++i) {
assert(mpeg12_macroblocks[i].base.codec == PIPE_VIDEO_CODEC_MPEG12);
grab_macroblock(renderer, &mpeg12_macroblocks[i]);
}
num_macroblocks -= num_to_submit;
- if (renderer->num_macroblocks == renderer->macroblocks_per_batch)
- {
+ if (renderer->num_macroblocks == renderer->macroblocks_per_batch) {
xfer_buffers_unmap(renderer);
flush(renderer);
xfer_buffers_map(renderer);
assert
(
- interpolation == TGSI_INTERPOLATE_CONSTANT ||
- interpolation == TGSI_INTERPOLATE_LINEAR ||
- interpolation == TGSI_INTERPOLATE_PERSPECTIVE
+ interpolation == TGSI_INTERPOLATE_CONSTANT ||
+ interpolation == TGSI_INTERPOLATE_LINEAR ||
+ interpolation == TGSI_INTERPOLATE_PERSPECTIVE
);
decl.Declaration.File = TGSI_FILE_INPUT;
template.tex_usage = PIPE_TEXTURE_USAGE_SAMPLER | PIPE_TEXTURE_USAGE_RENDER_TARGET;
sp_vsfc->tex = screen->texture_create(screen, &template);
- if (!sp_vsfc->tex)
- {
+ if (!sp_vsfc->tex) {
FREE(sp_vsfc);
return NULL;
}
static void
sp_mpeg12_destroy(struct pipe_video_context *vpipe)
{
- struct sp_mpeg12_context *ctx = (struct sp_mpeg12_context*)vpipe;
+ struct sp_mpeg12_context *ctx = (struct sp_mpeg12_context*)vpipe;
- assert(vpipe);
+ assert(vpipe);
- /* Asserted in softpipe_delete_fs_state() for some reason */
- ctx->pipe->bind_vs_state(ctx->pipe, NULL);
- ctx->pipe->bind_fs_state(ctx->pipe, NULL);
+ /* Asserted in softpipe_delete_fs_state() for some reason */
+ ctx->pipe->bind_vs_state(ctx->pipe, NULL);
+ ctx->pipe->bind_fs_state(ctx->pipe, NULL);
- ctx->pipe->delete_blend_state(ctx->pipe, ctx->blend);
- ctx->pipe->delete_rasterizer_state(ctx->pipe, ctx->rast);
- ctx->pipe->delete_depth_stencil_alpha_state(ctx->pipe, ctx->dsa);
+ ctx->pipe->delete_blend_state(ctx->pipe, ctx->blend);
+ ctx->pipe->delete_rasterizer_state(ctx->pipe, ctx->rast);
+ ctx->pipe->delete_depth_stencil_alpha_state(ctx->pipe, ctx->dsa);
- pipe_video_surface_reference(&ctx->decode_target, NULL);
- vl_compositor_cleanup(&ctx->compositor);
- vl_mpeg12_mc_renderer_cleanup(&ctx->mc_renderer);
- ctx->pipe->destroy(ctx->pipe);
+ pipe_video_surface_reference(&ctx->decode_target, NULL);
+ vl_compositor_cleanup(&ctx->compositor);
+ vl_mpeg12_mc_renderer_cleanup(&ctx->mc_renderer);
+ ctx->pipe->destroy(ctx->pipe);
- FREE(ctx);
+ FREE(ctx);
}
static void
struct pipe_macroblock *macroblocks,
struct pipe_fence_handle **fence)
{
- struct sp_mpeg12_context *ctx = (struct sp_mpeg12_context*)vpipe;
- struct pipe_mpeg12_macroblock *mpeg12_macroblocks = (struct pipe_mpeg12_macroblock*)macroblocks;
+ struct sp_mpeg12_context *ctx = (struct sp_mpeg12_context*)vpipe;
+ struct pipe_mpeg12_macroblock *mpeg12_macroblocks = (struct pipe_mpeg12_macroblock*)macroblocks;
- assert(vpipe);
- assert(num_macroblocks);
- assert(macroblocks);
- assert(macroblocks->codec == PIPE_VIDEO_CODEC_MPEG12);
- assert(ctx->decode_target);
+ assert(vpipe);
+ assert(num_macroblocks);
+ assert(macroblocks);
+ assert(macroblocks->codec == PIPE_VIDEO_CODEC_MPEG12);
+ assert(ctx->decode_target);
- vl_mpeg12_mc_renderer_render_macroblocks(&ctx->mc_renderer,
- softpipe_video_surface(ctx->decode_target)->tex,
- past ? softpipe_video_surface(past)->tex : NULL,
- future ? softpipe_video_surface(future)->tex : NULL,
- num_macroblocks, mpeg12_macroblocks, fence);
+ vl_mpeg12_mc_renderer_render_macroblocks(&ctx->mc_renderer,
+ softpipe_video_surface(ctx->decode_target)->tex,
+ past ? softpipe_video_surface(past)->tex : NULL,
+ future ? softpipe_video_surface(future)->tex : NULL,
+ num_macroblocks, mpeg12_macroblocks, fence);
}
static void
unsigned value,
struct pipe_surface *surface)
{
- struct sp_mpeg12_context *ctx = (struct sp_mpeg12_context*)vpipe;
+ struct sp_mpeg12_context *ctx = (struct sp_mpeg12_context*)vpipe;
- assert(vpipe);
- assert(surface);
+ assert(vpipe);
+ assert(surface);
- ctx->pipe->surface_fill(ctx->pipe, surface, x, y, width, height, value);
+ ctx->pipe->surface_fill(ctx->pipe, surface, x, y, width, height, value);
}
static void
struct pipe_video_rect *layer_dst_areas*/
struct pipe_fence_handle **fence)
{
- struct sp_mpeg12_context *ctx = (struct sp_mpeg12_context*)vpipe;
+ struct sp_mpeg12_context *ctx = (struct sp_mpeg12_context*)vpipe;
- assert(vpipe);
- assert(src_surface);
- assert(src_area);
- assert(dst_surface);
- assert(dst_area);
+ assert(vpipe);
+ assert(src_surface);
+ assert(src_area);
+ assert(dst_surface);
+ assert(dst_area);
- vl_compositor_render(&ctx->compositor, softpipe_video_surface(src_surface)->tex,
- picture_type, src_area, dst_surface->texture, dst_area, fence);
+ vl_compositor_render(&ctx->compositor, softpipe_video_surface(src_surface)->tex,
+ picture_type, src_area, dst_surface->texture, dst_area, fence);
}
static void
sp_mpeg12_set_decode_target(struct pipe_video_context *vpipe,
struct pipe_video_surface *dt)
{
- struct sp_mpeg12_context *ctx = (struct sp_mpeg12_context*)vpipe;
+ struct sp_mpeg12_context *ctx = (struct sp_mpeg12_context*)vpipe;
- assert(vpipe);
- assert(dt);
+ assert(vpipe);
+ assert(dt);
- pipe_video_surface_reference(&ctx->decode_target, dt);
+ pipe_video_surface_reference(&ctx->decode_target, dt);
}
static bool
init_pipe_state(struct sp_mpeg12_context *ctx)
{
- struct pipe_rasterizer_state rast;
- struct pipe_blend_state blend;
- struct pipe_depth_stencil_alpha_state dsa;
+ struct pipe_rasterizer_state rast;
+ struct pipe_blend_state blend;
+ struct pipe_depth_stencil_alpha_state dsa;
- assert(ctx);
+ assert(ctx);
- rast.flatshade = 1;
- rast.flatshade_first = 0;
- rast.light_twoside = 0;
- rast.front_winding = PIPE_WINDING_CCW;
- rast.cull_mode = PIPE_WINDING_CW;
- rast.fill_cw = PIPE_POLYGON_MODE_FILL;
- rast.fill_ccw = PIPE_POLYGON_MODE_FILL;
- rast.offset_cw = 0;
- rast.offset_ccw = 0;
- rast.scissor = 0;
- rast.poly_smooth = 0;
- rast.poly_stipple_enable = 0;
- rast.point_sprite = 0;
- rast.point_size_per_vertex = 0;
- rast.multisample = 0;
- rast.line_smooth = 0;
- rast.line_stipple_enable = 0;
- rast.line_stipple_factor = 0;
- rast.line_stipple_pattern = 0;
- rast.line_last_pixel = 0;
- rast.bypass_vs_clip_and_viewport = 0;
- rast.line_width = 1;
- rast.point_smooth = 0;
- rast.point_size = 1;
- rast.offset_units = 1;
- rast.offset_scale = 1;
- /*rast.sprite_coord_mode[i] = ;*/
- ctx->rast = ctx->pipe->create_rasterizer_state(ctx->pipe, &rast);
- ctx->pipe->bind_rasterizer_state(ctx->pipe, ctx->rast);
+ rast.flatshade = 1;
+ rast.flatshade_first = 0;
+ rast.light_twoside = 0;
+ rast.front_winding = PIPE_WINDING_CCW;
+ rast.cull_mode = PIPE_WINDING_CW;
+ rast.fill_cw = PIPE_POLYGON_MODE_FILL;
+ rast.fill_ccw = PIPE_POLYGON_MODE_FILL;
+ rast.offset_cw = 0;
+ rast.offset_ccw = 0;
+ rast.scissor = 0;
+ rast.poly_smooth = 0;
+ rast.poly_stipple_enable = 0;
+ rast.point_sprite = 0;
+ rast.point_size_per_vertex = 0;
+ rast.multisample = 0;
+ rast.line_smooth = 0;
+ rast.line_stipple_enable = 0;
+ rast.line_stipple_factor = 0;
+ rast.line_stipple_pattern = 0;
+ rast.line_last_pixel = 0;
+ rast.bypass_vs_clip_and_viewport = 0;
+ rast.line_width = 1;
+ rast.point_smooth = 0;
+ rast.point_size = 1;
+ rast.offset_units = 1;
+ rast.offset_scale = 1;
+ /*rast.sprite_coord_mode[i] = ;*/
+ ctx->rast = ctx->pipe->create_rasterizer_state(ctx->pipe, &rast);
+ ctx->pipe->bind_rasterizer_state(ctx->pipe, ctx->rast);
- blend.blend_enable = 0;
- blend.rgb_func = PIPE_BLEND_ADD;
- blend.rgb_src_factor = PIPE_BLENDFACTOR_ONE;
- blend.rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
- blend.alpha_func = PIPE_BLEND_ADD;
- blend.alpha_src_factor = PIPE_BLENDFACTOR_ONE;
- blend.alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
- blend.logicop_enable = 0;
- blend.logicop_func = PIPE_LOGICOP_CLEAR;
- /* Needed to allow color writes to FB, even if blending disabled */
- blend.colormask = PIPE_MASK_RGBA;
- blend.dither = 0;
- ctx->blend = ctx->pipe->create_blend_state(ctx->pipe, &blend);
- ctx->pipe->bind_blend_state(ctx->pipe, ctx->blend);
+ blend.blend_enable = 0;
+ blend.rgb_func = PIPE_BLEND_ADD;
+ blend.rgb_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
+ blend.alpha_func = PIPE_BLEND_ADD;
+ blend.alpha_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
+ blend.logicop_enable = 0;
+ blend.logicop_func = PIPE_LOGICOP_CLEAR;
+ /* Needed to allow color writes to FB, even if blending disabled */
+ blend.colormask = PIPE_MASK_RGBA;
+ blend.dither = 0;
+ ctx->blend = ctx->pipe->create_blend_state(ctx->pipe, &blend);
+ ctx->pipe->bind_blend_state(ctx->pipe, ctx->blend);
- dsa.depth.enabled = 0;
- dsa.depth.writemask = 0;
- dsa.depth.func = PIPE_FUNC_ALWAYS;
- dsa.depth.occlusion_count = 0;
- for (unsigned i = 0; i < 2; ++i)
- {
- dsa.stencil[i].enabled = 0;
- dsa.stencil[i].func = PIPE_FUNC_ALWAYS;
- dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP;
- dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP;
- dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP;
- dsa.stencil[i].ref_value = 0;
- dsa.stencil[i].valuemask = 0;
- dsa.stencil[i].writemask = 0;
- }
- dsa.alpha.enabled = 0;
- dsa.alpha.func = PIPE_FUNC_ALWAYS;
- dsa.alpha.ref_value = 0;
- ctx->dsa = ctx->pipe->create_depth_stencil_alpha_state(ctx->pipe, &dsa);
- ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe, ctx->dsa);
+ dsa.depth.enabled = 0;
+ dsa.depth.writemask = 0;
+ dsa.depth.func = PIPE_FUNC_ALWAYS;
+ dsa.depth.occlusion_count = 0;
+ for (unsigned i = 0; i < 2; ++i) {
+ dsa.stencil[i].enabled = 0;
+ dsa.stencil[i].func = PIPE_FUNC_ALWAYS;
+ dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[i].ref_value = 0;
+ dsa.stencil[i].valuemask = 0;
+ dsa.stencil[i].writemask = 0;
+ }
+ dsa.alpha.enabled = 0;
+ dsa.alpha.func = PIPE_FUNC_ALWAYS;
+ dsa.alpha.ref_value = 0;
+ ctx->dsa = ctx->pipe->create_depth_stencil_alpha_state(ctx->pipe, &dsa);
+ ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe, ctx->dsa);
- return true;
+ return true;
}
static struct pipe_video_context *
enum pipe_video_chroma_format chroma_format,
unsigned width, unsigned height)
{
- struct sp_mpeg12_context *ctx;
+ struct sp_mpeg12_context *ctx;
- assert(u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12);
+ assert(u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12);
- ctx = CALLOC_STRUCT(sp_mpeg12_context);
+ ctx = CALLOC_STRUCT(sp_mpeg12_context);
- if (!ctx)
- return NULL;
+ if (!ctx)
+ return NULL;
- ctx->base.profile = profile;
- ctx->base.chroma_format = chroma_format;
- ctx->base.width = width;
- ctx->base.height = height;
+ ctx->base.profile = profile;
+ ctx->base.chroma_format = chroma_format;
+ ctx->base.width = width;
+ ctx->base.height = height;
- ctx->base.screen = screen;
- ctx->base.destroy = sp_mpeg12_destroy;
- ctx->base.decode_macroblocks = sp_mpeg12_decode_macroblocks;
- ctx->base.clear_surface = sp_mpeg12_clear_surface;
- ctx->base.render_picture = sp_mpeg12_render_picture;
- ctx->base.set_decode_target = sp_mpeg12_set_decode_target;
+ ctx->base.screen = screen;
+ ctx->base.destroy = sp_mpeg12_destroy;
+ ctx->base.decode_macroblocks = sp_mpeg12_decode_macroblocks;
+ ctx->base.clear_surface = sp_mpeg12_clear_surface;
+ ctx->base.render_picture = sp_mpeg12_render_picture;
+ ctx->base.set_decode_target = sp_mpeg12_set_decode_target;
- ctx->pipe = softpipe_create(screen);
- if (!ctx->pipe)
- {
- FREE(ctx);
- return NULL;
- }
+ ctx->pipe = softpipe_create(screen);
+ if (!ctx->pipe) {
+ FREE(ctx);
+ return NULL;
+ }
- /* TODO: Use slice buffering for softpipe when implemented, no advantage to buffering an entire picture */
- if (!vl_mpeg12_mc_renderer_init(&ctx->mc_renderer, ctx->pipe,
- width, height, chroma_format,
- VL_MPEG12_MC_RENDERER_BUFFER_PICTURE,
- /* TODO: Use XFER_NONE when implemented */
- VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ONE,
- true))
- {
- ctx->pipe->destroy(ctx->pipe);
- FREE(ctx);
- return NULL;
- }
+ /* TODO: Use slice buffering for softpipe when implemented, no advantage to buffering an entire picture */
+ if (!vl_mpeg12_mc_renderer_init(&ctx->mc_renderer, ctx->pipe,
+ width, height, chroma_format,
+ VL_MPEG12_MC_RENDERER_BUFFER_PICTURE,
+ /* TODO: Use XFER_NONE when implemented */
+ VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ONE,
+ true)) {
+ ctx->pipe->destroy(ctx->pipe);
+ FREE(ctx);
+ return NULL;
+ }
- if (!vl_compositor_init(&ctx->compositor, ctx->pipe))
- {
- vl_mpeg12_mc_renderer_cleanup(&ctx->mc_renderer);
- ctx->pipe->destroy(ctx->pipe);
- FREE(ctx);
- return NULL;
- }
+ if (!vl_compositor_init(&ctx->compositor, ctx->pipe)) {
+ vl_mpeg12_mc_renderer_cleanup(&ctx->mc_renderer);
+ ctx->pipe->destroy(ctx->pipe);
+ FREE(ctx);
+ return NULL;
+ }
- if (!init_pipe_state(ctx))
- {
- vl_compositor_cleanup(&ctx->compositor);
- vl_mpeg12_mc_renderer_cleanup(&ctx->mc_renderer);
- ctx->pipe->destroy(ctx->pipe);
- FREE(ctx);
- return NULL;
- }
+ if (!init_pipe_state(ctx)) {
+ vl_compositor_cleanup(&ctx->compositor);
+ vl_mpeg12_mc_renderer_cleanup(&ctx->mc_renderer);
+ ctx->pipe->destroy(ctx->pipe);
+ FREE(ctx);
+ return NULL;
+ }
- return &ctx->base;
+ return &ctx->base;
}
struct pipe_video_context *
enum pipe_video_chroma_format chroma_format,
unsigned width, unsigned height)
{
- assert(screen);
- assert(width && height);
+ assert(screen);
+ assert(width && height);
- switch (u_reduce_video_profile(profile))
- {
- case PIPE_VIDEO_CODEC_MPEG12:
- return sp_mpeg12_create(screen, profile,
- chroma_format,
- width, height);
- default:
- return NULL;
- }
+ switch (u_reduce_video_profile(profile)) {
+ case PIPE_VIDEO_CODEC_MPEG12:
+ return sp_mpeg12_create(screen, profile,
+ chroma_format,
+ width, height);
+ default:
+ return NULL;
+ }
}
struct sp_mpeg12_context
{
- struct pipe_video_context base;
- struct pipe_context *pipe;
- struct pipe_video_surface *decode_target;
- struct vl_mpeg12_mc_renderer mc_renderer;
- struct vl_compositor compositor;
+ struct pipe_video_context base;
+ struct pipe_context *pipe;
+ struct pipe_video_surface *decode_target;
+ struct vl_mpeg12_mc_renderer mc_renderer;
+ struct vl_compositor compositor;
- void *rast;
- void *dsa;
- void *blend;
+ void *rast;
+ void *dsa;
+ void *blend;
};
struct pipe_video_context *
*found_port = false;
- for (unsigned int i = 0; i < XScreenCount(dpy); ++i)
- {
+ for (unsigned int i = 0; i < XScreenCount(dpy); ++i) {
ret = XvQueryAdaptors(dpy, XRootWindow(dpy, i), &num_adaptors, &adaptor_info);
if (ret != Success)
return ret;
- for (unsigned int j = 0; j < num_adaptors && !*found_port; ++j)
- {
- for (unsigned int k = 0; k < adaptor_info[j].num_ports && !*found_port; ++k)
- {
+ for (unsigned int j = 0; j < num_adaptors && !*found_port; ++j) {
+ for (unsigned int k = 0; k < adaptor_info[j].num_ports && !*found_port; ++k) {
XvMCSurfaceInfo *surface_info;
if (adaptor_info[j].base_id + k != port)
*found_port = true;
surface_info = XvMCListSurfaceTypes(dpy, adaptor_info[j].base_id, &num_types);
- if (!surface_info)
- {
+ if (!surface_info) {
XvFreeAdaptorInfo(adaptor_info);
return BadAlloc;
}
- for (unsigned int l = 0; l < num_types && !found_surface; ++l)
- {
+ for (unsigned int l = 0; l < num_types && !found_surface; ++l) {
if (surface_info[l].surface_type_id != surface_type_id)
continue;
*screen = i;
}
- XFree(surface_info);
+ XFree(surface_info);
}
}
static enum pipe_video_chroma_format FormatToPipe(int xvmc_format)
{
- switch (xvmc_format)
- {
+ switch (xvmc_format) {
case XVMC_CHROMA_FORMAT_420:
return PIPE_VIDEO_CHROMA_FORMAT_420;
case XVMC_CHROMA_FORMAT_422:
/* TODO: Reuse screen if process creates another context */
screen = vl_screen_create(dpy, scrn);
- if (!screen)
- {
+ if (!screen) {
FREE(context_priv);
return BadAlloc;
}
vpipe = vl_video_create(screen, ProfileToPipe(mc_type),
FormatToPipe(chroma_format), width, height);
- if (!vpipe)
- {
+ if (!vpipe) {
screen->destroy(screen);
FREE(context_priv);
return BadAlloc;
static enum pipe_mpeg12_picture_type PictureToPipe(int xvmc_pic)
{
- switch (xvmc_pic)
- {
+ switch (xvmc_pic) {
case XVMC_TOP_FIELD:
return PIPE_MPEG12_PICTURE_TYPE_FIELD_TOP;
case XVMC_BOTTOM_FIELD:
static enum pipe_mpeg12_motion_type MotionToPipe(int xvmc_motion_type, int xvmc_dct_type)
{
- switch (xvmc_motion_type)
- {
+ switch (xvmc_motion_type) {
case XVMC_PREDICTION_FRAME:
return xvmc_dct_type == XVMC_DCT_TYPE_FIELD ?
PIPE_MPEG12_MOTION_TYPE_16x8 : PIPE_MPEG12_MOTION_TYPE_FRAME;
assert(vpipe);
- if (*backbuffer)
- {
+ if (*backbuffer) {
if ((*backbuffer)->width != width || (*backbuffer)->height != height)
pipe_surface_reference(backbuffer, NULL);
else
xvmc_mb = xvmc_macroblocks->macro_blocks + first_macroblock;
- for (i = 0; i < num_macroblocks; ++i)
- {
+ for (i = 0; i < num_macroblocks; ++i) {
pipe_macroblocks->base.codec = PIPE_VIDEO_CODEC_MPEG12;
pipe_macroblocks->mbx = xvmc_mb->x;
pipe_macroblocks->mby = xvmc_mb->y;
vsfc = vpipe->screen->video_surface_create(vpipe->screen, vpipe->chroma_format,
vpipe->width, vpipe->height);
- if (!vsfc)
- {
+ if (!vsfc) {
FREE(surface_priv);
return BadAlloc;
}
Status XvMCFlushSurface(Display *dpy, XvMCSurface *surface)
{
-#if 0
- struct vlSurface *vl_sfc;
-
- assert(dpy);
-
- if (!surface)
- return XvMCBadSurface;
+ assert(dpy);
- vl_sfc = surface->privData;
+ if (!surface)
+ return XvMCBadSurface;
- vlSurfaceFlush(vl_sfc);
-#endif
return Success;
}
Status XvMCSyncSurface(Display *dpy, XvMCSurface *surface)
{
-#if 0
- struct vlSurface *vl_sfc;
-
- assert(dpy);
-
- if (!surface)
- return XvMCBadSurface;
+ assert(dpy);
- vl_sfc = surface->privData;
+ if (!surface)
+ return XvMCBadSurface;
- vlSurfaceSync(vl_sfc);
-#endif
return Success;
}
Status XvMCGetSurfaceStatus(Display *dpy, XvMCSurface *surface, int *status)
{
-#if 0
- struct vlSurface *vl_sfc;
- enum vlResourceStatus res_status;
-
- assert(dpy);
-
- if (!surface)
- return XvMCBadSurface;
-
- assert(status);
-
- vl_sfc = surface->privData;
-
- vlSurfaceGetStatus(vl_sfc, &res_status);
-
- switch (res_status)
- {
- case vlResourceStatusFree:
- {
- *status = 0;
- break;
- }
- case vlResourceStatusRendering:
- {
- *status = XVMC_RENDERING;
- break;
- }
- case vlResourceStatusDisplaying:
- {
- *status = XVMC_DISPLAYING;
- break;
- }
- default:
- assert(0);
- }
-#endif
+ assert(dpy);
+
+ if (!surface)
+ return XvMCBadSurface;
+
+ assert(status);
+
*status = 0;
+
return Success;
}