1 /**************************************************************************
3 * Copyright 2009 Younes Manton.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 #include <pipe/p_context.h>
32 #include <util/u_sampler.h>
33 #include <util/u_draw.h>
35 #include <tgsi/tgsi_ureg.h>
37 #include "vl_defines.h"
38 #include "vl_vertex_buffers.h"
48 VS_O_FLAGS
= VS_O_VTOP
,
49 VS_O_VTEX
= VS_O_VBOTTOM
52 static struct ureg_dst
53 calc_position(struct vl_mc
*r
, struct ureg_program
*shader
, struct ureg_src block_scale
)
55 struct ureg_src vrect
, vpos
;
56 struct ureg_dst t_vpos
;
57 struct ureg_dst o_vpos
;
59 vrect
= ureg_DECL_vs_input(shader
, VS_I_RECT
);
60 vpos
= ureg_DECL_vs_input(shader
, VS_I_VPOS
);
62 t_vpos
= ureg_DECL_temporary(shader
);
64 o_vpos
= ureg_DECL_output(shader
, TGSI_SEMANTIC_POSITION
, VS_O_VPOS
);
67 * block_scale = (MACROBLOCK_WIDTH, MACROBLOCK_HEIGHT) / (dst.width, dst.height)
69 * t_vpos = (vpos + vrect) * block_scale
73 ureg_ADD(shader
, ureg_writemask(t_vpos
, TGSI_WRITEMASK_XY
), vpos
, vrect
);
74 ureg_MUL(shader
, ureg_writemask(t_vpos
, TGSI_WRITEMASK_XY
), ureg_src(t_vpos
), block_scale
);
75 ureg_MOV(shader
, ureg_writemask(o_vpos
, TGSI_WRITEMASK_XY
), ureg_src(t_vpos
));
76 ureg_MOV(shader
, ureg_writemask(o_vpos
, TGSI_WRITEMASK_ZW
), ureg_imm1f(shader
, 1.0f
));
81 static struct ureg_dst
82 calc_line(struct ureg_program
*shader
)
87 tmp
= ureg_DECL_temporary(shader
);
89 pos
= ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_POSITION
, VS_O_VPOS
, TGSI_INTERPOLATE_LINEAR
);
92 * tmp.y = fraction(pos.y / 2) >= 0.5 ? 1 : 0
94 ureg_MUL(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_Y
), pos
, ureg_imm1f(shader
, 0.5f
));
95 ureg_FRC(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_Y
), ureg_src(tmp
));
96 ureg_SGE(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_Y
), ureg_src(tmp
), ureg_imm1f(shader
, 0.5f
));
102 create_ref_vert_shader(struct vl_mc
*r
)
104 struct ureg_program
*shader
;
105 struct ureg_src mv_scale
;
106 struct ureg_src vmv
[2];
107 struct ureg_dst t_vpos
;
108 struct ureg_dst o_vpos
, o_vmv
[2];
111 shader
= ureg_create(TGSI_PROCESSOR_VERTEX
);
115 vmv
[0] = ureg_DECL_vs_input(shader
, VS_I_MV_TOP
);
116 vmv
[1] = ureg_DECL_vs_input(shader
, VS_I_MV_BOTTOM
);
118 t_vpos
= calc_position(r
, shader
, ureg_imm2f(shader
,
119 (float)MACROBLOCK_WIDTH
/ r
->buffer_width
,
120 (float)MACROBLOCK_HEIGHT
/ r
->buffer_height
)
123 /* XXX The position is not written, which may lead to undefined rendering.
124 * XXX This is a serious bug. */
125 o_vpos
= ureg_DECL_output(shader
, TGSI_SEMANTIC_POSITION
, VS_O_VPOS
);
126 o_vmv
[0] = ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VTOP
);
127 o_vmv
[1] = ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VBOTTOM
);
130 * mv_scale.xy = 0.5 / (dst.width, dst.height);
131 * mv_scale.z = 1.0f / 4.0f
132 * mv_scale.w = 1.0f / 255.0f
134 * // Apply motion vectors
135 * o_vmv[0..1].xy = vmv[0..1] * mv_scale + t_vpos
136 * o_vmv[0..1].zw = vmv[0..1] * mv_scale
140 mv_scale
= ureg_imm4f(shader
,
141 0.5f
/ r
->buffer_width
,
142 0.5f
/ r
->buffer_height
,
144 1.0f
/ PIPE_VIDEO_MV_WEIGHT_MAX
);
146 for (i
= 0; i
< 2; ++i
) {
147 ureg_MAD(shader
, ureg_writemask(o_vmv
[i
], TGSI_WRITEMASK_XY
), mv_scale
, vmv
[i
], ureg_src(t_vpos
));
148 ureg_MUL(shader
, ureg_writemask(o_vmv
[i
], TGSI_WRITEMASK_ZW
), mv_scale
, vmv
[i
]);
151 ureg_release_temporary(shader
, t_vpos
);
155 return ureg_create_shader_and_destroy(shader
, r
->pipe
);
159 create_ref_frag_shader(struct vl_mc
*r
)
161 const float y_scale
=
162 r
->buffer_height
/ 2 *
163 r
->macroblock_size
/ MACROBLOCK_HEIGHT
;
165 struct ureg_program
*shader
;
166 struct ureg_src tc
[2], sampler
;
167 struct ureg_dst ref
, field
;
168 struct ureg_dst fragment
;
171 shader
= ureg_create(TGSI_PROCESSOR_FRAGMENT
);
175 tc
[0] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VTOP
, TGSI_INTERPOLATE_LINEAR
);
176 tc
[1] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VBOTTOM
, TGSI_INTERPOLATE_LINEAR
);
178 sampler
= ureg_DECL_sampler(shader
, 0);
179 ref
= ureg_DECL_temporary(shader
);
181 fragment
= ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, 0);
183 field
= calc_line(shader
);
186 * ref = field.z ? tc[1] : tc[0]
188 * // Adjust tc acording to top/bottom field selection
191 * ref.y = floor(ref.y)
195 * fragment.xyz = tex(ref, sampler[0])
197 ureg_CMP(shader
, ureg_writemask(ref
, TGSI_WRITEMASK_XYZ
),
198 ureg_negate(ureg_scalar(ureg_src(field
), TGSI_SWIZZLE_Y
)),
200 ureg_CMP(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_W
),
201 ureg_negate(ureg_scalar(ureg_src(field
), TGSI_SWIZZLE_Y
)),
204 ureg_IF(shader
, ureg_scalar(ureg_src(ref
), TGSI_SWIZZLE_Z
), &label
);
206 ureg_MUL(shader
, ureg_writemask(ref
, TGSI_WRITEMASK_Y
),
207 ureg_src(ref
), ureg_imm1f(shader
, y_scale
));
208 ureg_FLR(shader
, ureg_writemask(ref
, TGSI_WRITEMASK_Y
), ureg_src(ref
));
209 ureg_ADD(shader
, ureg_writemask(ref
, TGSI_WRITEMASK_Y
),
210 ureg_src(ref
), ureg_scalar(ureg_src(ref
), TGSI_SWIZZLE_Z
));
211 ureg_MUL(shader
, ureg_writemask(ref
, TGSI_WRITEMASK_Y
),
212 ureg_src(ref
), ureg_imm1f(shader
, 1.0f
/ y_scale
));
214 ureg_fixup_label(shader
, label
, ureg_get_instruction_number(shader
));
217 ureg_TEX(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_XYZ
), TGSI_TEXTURE_2D
, ureg_src(ref
), sampler
);
219 ureg_release_temporary(shader
, ref
);
221 ureg_release_temporary(shader
, field
);
224 return ureg_create_shader_and_destroy(shader
, r
->pipe
);
228 create_ycbcr_vert_shader(struct vl_mc
*r
, vl_mc_ycbcr_vert_shader vs_callback
, void *callback_priv
)
230 struct ureg_program
*shader
;
232 struct ureg_src vrect
, vpos
;
233 struct ureg_dst t_vpos
, t_vtex
;
234 struct ureg_dst o_vpos
, o_flags
;
236 struct vertex2f scale
= {
237 (float)BLOCK_WIDTH
/ r
->buffer_width
* MACROBLOCK_WIDTH
/ r
->macroblock_size
,
238 (float)BLOCK_HEIGHT
/ r
->buffer_height
* MACROBLOCK_HEIGHT
/ r
->macroblock_size
243 shader
= ureg_create(TGSI_PROCESSOR_VERTEX
);
247 vrect
= ureg_DECL_vs_input(shader
, VS_I_RECT
);
248 vpos
= ureg_DECL_vs_input(shader
, VS_I_VPOS
);
250 t_vpos
= calc_position(r
, shader
, ureg_imm2f(shader
, scale
.x
, scale
.y
));
251 t_vtex
= ureg_DECL_temporary(shader
);
253 o_vpos
= ureg_DECL_output(shader
, TGSI_SEMANTIC_POSITION
, VS_O_VPOS
);
254 o_flags
= ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_FLAGS
);
258 * o_flags.z = intra * 0.5
261 * t_vtex.xy = vrect.y ? { 0, scale.y } : { -scale.y : 0 }
262 * t_vtex.z = vpos.y % 2
263 * t_vtex.y = t_vtex.z ? t_vtex.x : t_vtex.y
264 * o_vpos.y = t_vtex.y + t_vpos.y
266 * o_flags.w = t_vtex.z ? 0 : 1
271 vs_callback(callback_priv
, r
, shader
, VS_O_VTEX
, t_vpos
);
273 ureg_MUL(shader
, ureg_writemask(o_flags
, TGSI_WRITEMASK_Z
),
274 ureg_scalar(vpos
, TGSI_SWIZZLE_Z
), ureg_imm1f(shader
, 0.5f
));
275 ureg_MOV(shader
, ureg_writemask(o_flags
, TGSI_WRITEMASK_W
), ureg_imm1f(shader
, -1.0f
));
277 if (r
->macroblock_size
== MACROBLOCK_HEIGHT
) { //TODO
278 ureg_IF(shader
, ureg_scalar(vpos
, TGSI_SWIZZLE_W
), &label
);
280 ureg_CMP(shader
, ureg_writemask(t_vtex
, TGSI_WRITEMASK_XY
),
281 ureg_negate(ureg_scalar(vrect
, TGSI_SWIZZLE_Y
)),
282 ureg_imm2f(shader
, 0.0f
, scale
.y
),
283 ureg_imm2f(shader
, -scale
.y
, 0.0f
));
284 ureg_MUL(shader
, ureg_writemask(t_vtex
, TGSI_WRITEMASK_Z
),
285 ureg_scalar(vpos
, TGSI_SWIZZLE_Y
), ureg_imm1f(shader
, 0.5f
));
287 ureg_FRC(shader
, ureg_writemask(t_vtex
, TGSI_WRITEMASK_Z
), ureg_src(t_vtex
));
289 ureg_CMP(shader
, ureg_writemask(t_vtex
, TGSI_WRITEMASK_Y
),
290 ureg_negate(ureg_scalar(ureg_src(t_vtex
), TGSI_SWIZZLE_Z
)),
291 ureg_scalar(ureg_src(t_vtex
), TGSI_SWIZZLE_X
),
292 ureg_scalar(ureg_src(t_vtex
), TGSI_SWIZZLE_Y
));
293 ureg_ADD(shader
, ureg_writemask(o_vpos
, TGSI_WRITEMASK_Y
),
294 ureg_src(t_vpos
), ureg_src(t_vtex
));
296 ureg_CMP(shader
, ureg_writemask(o_flags
, TGSI_WRITEMASK_W
),
297 ureg_negate(ureg_scalar(ureg_src(t_vtex
), TGSI_SWIZZLE_Z
)),
298 ureg_imm1f(shader
, 0.0f
), ureg_imm1f(shader
, 1.0f
));
300 ureg_fixup_label(shader
, label
, ureg_get_instruction_number(shader
));
304 ureg_release_temporary(shader
, t_vtex
);
305 ureg_release_temporary(shader
, t_vpos
);
309 return ureg_create_shader_and_destroy(shader
, r
->pipe
);
313 create_ycbcr_frag_shader(struct vl_mc
*r
, float scale
, bool invert
,
314 vl_mc_ycbcr_frag_shader fs_callback
, void *callback_priv
)
316 struct ureg_program
*shader
;
317 struct ureg_src flags
;
319 struct ureg_dst fragment
;
322 shader
= ureg_create(TGSI_PROCESSOR_FRAGMENT
);
326 flags
= ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_FLAGS
, TGSI_INTERPOLATE_LINEAR
);
328 fragment
= ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, 0);
330 tmp
= calc_line(shader
);
336 * fragment.xyz = tex(tc, sampler) * scale + tc.z
341 ureg_SEQ(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_Y
),
342 ureg_scalar(flags
, TGSI_SWIZZLE_W
), ureg_src(tmp
));
344 ureg_IF(shader
, ureg_scalar(ureg_src(tmp
), TGSI_SWIZZLE_Y
), &label
);
348 ureg_fixup_label(shader
, label
, ureg_get_instruction_number(shader
));
349 ureg_ELSE(shader
, &label
);
351 fs_callback(callback_priv
, r
, shader
, VS_O_VTEX
, tmp
);
354 ureg_MAD(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_XYZ
),
355 ureg_src(tmp
), ureg_imm1f(shader
, scale
),
356 ureg_scalar(flags
, TGSI_SWIZZLE_Z
));
358 ureg_ADD(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_XYZ
),
359 ureg_src(tmp
), ureg_scalar(flags
, TGSI_SWIZZLE_Z
));
361 ureg_MUL(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_XYZ
), ureg_src(tmp
), ureg_imm1f(shader
, invert
? -1.0f
: 1.0f
));
362 ureg_MOV(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_W
), ureg_imm1f(shader
, 1.0f
));
364 ureg_fixup_label(shader
, label
, ureg_get_instruction_number(shader
));
367 ureg_release_temporary(shader
, tmp
);
371 return ureg_create_shader_and_destroy(shader
, r
->pipe
);
375 init_pipe_state(struct vl_mc
*r
)
377 struct pipe_sampler_state sampler
;
378 struct pipe_blend_state blend
;
379 struct pipe_rasterizer_state rs_state
;
384 memset(&sampler
, 0, sizeof(sampler
));
385 sampler
.wrap_s
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
386 sampler
.wrap_t
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
387 sampler
.wrap_r
= PIPE_TEX_WRAP_CLAMP_TO_BORDER
;
388 sampler
.min_img_filter
= PIPE_TEX_FILTER_LINEAR
;
389 sampler
.min_mip_filter
= PIPE_TEX_MIPFILTER_NONE
;
390 sampler
.mag_img_filter
= PIPE_TEX_FILTER_LINEAR
;
391 sampler
.compare_mode
= PIPE_TEX_COMPARE_NONE
;
392 sampler
.compare_func
= PIPE_FUNC_ALWAYS
;
393 sampler
.normalized_coords
= 1;
394 r
->sampler_ref
= r
->pipe
->create_sampler_state(r
->pipe
, &sampler
);
396 goto error_sampler_ref
;
398 for (i
= 0; i
< VL_MC_NUM_BLENDERS
; ++i
) {
399 memset(&blend
, 0, sizeof blend
);
400 blend
.independent_blend_enable
= 0;
401 blend
.rt
[0].blend_enable
= 1;
402 blend
.rt
[0].rgb_func
= PIPE_BLEND_ADD
;
403 blend
.rt
[0].rgb_src_factor
= PIPE_BLENDFACTOR_SRC_ALPHA
;
404 blend
.rt
[0].rgb_dst_factor
= PIPE_BLENDFACTOR_ZERO
;
405 blend
.rt
[0].alpha_func
= PIPE_BLEND_ADD
;
406 blend
.rt
[0].alpha_src_factor
= PIPE_BLENDFACTOR_SRC_ALPHA
;
407 blend
.rt
[0].alpha_dst_factor
= PIPE_BLENDFACTOR_ZERO
;
408 blend
.logicop_enable
= 0;
409 blend
.logicop_func
= PIPE_LOGICOP_CLEAR
;
410 blend
.rt
[0].colormask
= i
;
412 r
->blend_clear
[i
] = r
->pipe
->create_blend_state(r
->pipe
, &blend
);
413 if (!r
->blend_clear
[i
])
416 blend
.rt
[0].rgb_dst_factor
= PIPE_BLENDFACTOR_ONE
;
417 blend
.rt
[0].alpha_dst_factor
= PIPE_BLENDFACTOR_ONE
;
418 r
->blend_add
[i
] = r
->pipe
->create_blend_state(r
->pipe
, &blend
);
419 if (!r
->blend_add
[i
])
422 blend
.rt
[0].rgb_func
= PIPE_BLEND_REVERSE_SUBTRACT
;
423 blend
.rt
[0].alpha_dst_factor
= PIPE_BLEND_REVERSE_SUBTRACT
;
424 r
->blend_sub
[i
] = r
->pipe
->create_blend_state(r
->pipe
, &blend
);
425 if (!r
->blend_sub
[i
])
429 memset(&rs_state
, 0, sizeof(rs_state
));
430 /*rs_state.sprite_coord_enable */
431 rs_state
.sprite_coord_mode
= PIPE_SPRITE_COORD_UPPER_LEFT
;
432 rs_state
.point_quad_rasterization
= true;
433 rs_state
.point_size
= BLOCK_WIDTH
;
434 rs_state
.gl_rasterization_rules
= true;
435 r
->rs_state
= r
->pipe
->create_rasterizer_state(r
->pipe
, &rs_state
);
443 for (i
= 0; i
< VL_MC_NUM_BLENDERS
; ++i
) {
445 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_sub
[i
]);
448 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_add
[i
]);
450 if (r
->blend_clear
[i
])
451 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_clear
[i
]);
454 r
->pipe
->delete_sampler_state(r
->pipe
, r
->sampler_ref
);
461 cleanup_pipe_state(struct vl_mc
*r
)
467 r
->pipe
->delete_sampler_state(r
->pipe
, r
->sampler_ref
);
468 for (i
= 0; i
< VL_MC_NUM_BLENDERS
; ++i
) {
469 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_clear
[i
]);
470 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_add
[i
]);
471 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_sub
[i
]);
473 r
->pipe
->delete_rasterizer_state(r
->pipe
, r
->rs_state
);
477 vl_mc_init(struct vl_mc
*renderer
, struct pipe_context
*pipe
,
478 unsigned buffer_width
, unsigned buffer_height
,
479 unsigned macroblock_size
, float scale
,
480 vl_mc_ycbcr_vert_shader vs_callback
,
481 vl_mc_ycbcr_frag_shader fs_callback
,
487 memset(renderer
, 0, sizeof(struct vl_mc
));
489 renderer
->pipe
= pipe
;
490 renderer
->buffer_width
= buffer_width
;
491 renderer
->buffer_height
= buffer_height
;
492 renderer
->macroblock_size
= macroblock_size
;
494 if (!init_pipe_state(renderer
))
495 goto error_pipe_state
;
497 renderer
->vs_ref
= create_ref_vert_shader(renderer
);
498 if (!renderer
->vs_ref
)
501 renderer
->vs_ycbcr
= create_ycbcr_vert_shader(renderer
, vs_callback
, callback_priv
);
502 if (!renderer
->vs_ycbcr
)
505 renderer
->fs_ref
= create_ref_frag_shader(renderer
);
506 if (!renderer
->fs_ref
)
509 renderer
->fs_ycbcr
= create_ycbcr_frag_shader(renderer
, scale
, false, fs_callback
, callback_priv
);
510 if (!renderer
->fs_ycbcr
)
513 renderer
->fs_ycbcr_sub
= create_ycbcr_frag_shader(renderer
, scale
, true, fs_callback
, callback_priv
);
514 if (!renderer
->fs_ycbcr_sub
)
515 goto error_fs_ycbcr_sub
;
520 renderer
->pipe
->delete_fs_state(renderer
->pipe
, renderer
->fs_ycbcr
);
523 renderer
->pipe
->delete_fs_state(renderer
->pipe
, renderer
->fs_ref
);
526 renderer
->pipe
->delete_vs_state(renderer
->pipe
, renderer
->vs_ycbcr
);
529 renderer
->pipe
->delete_vs_state(renderer
->pipe
, renderer
->vs_ref
);
532 cleanup_pipe_state(renderer
);
539 vl_mc_cleanup(struct vl_mc
*renderer
)
543 cleanup_pipe_state(renderer
);
545 renderer
->pipe
->delete_vs_state(renderer
->pipe
, renderer
->vs_ref
);
546 renderer
->pipe
->delete_vs_state(renderer
->pipe
, renderer
->vs_ycbcr
);
547 renderer
->pipe
->delete_fs_state(renderer
->pipe
, renderer
->fs_ref
);
548 renderer
->pipe
->delete_fs_state(renderer
->pipe
, renderer
->fs_ycbcr
);
549 renderer
->pipe
->delete_fs_state(renderer
->pipe
, renderer
->fs_ycbcr_sub
);
553 vl_mc_init_buffer(struct vl_mc
*renderer
, struct vl_mc_buffer
*buffer
)
555 assert(renderer
&& buffer
);
557 buffer
->renderer
= renderer
;
559 buffer
->viewport
.scale
[2] = 1;
560 buffer
->viewport
.scale
[3] = 1;
561 buffer
->viewport
.translate
[0] = 0;
562 buffer
->viewport
.translate
[1] = 0;
563 buffer
->viewport
.translate
[2] = 0;
564 buffer
->viewport
.translate
[3] = 0;
566 buffer
->fb_state
.nr_cbufs
= 1;
567 buffer
->fb_state
.zsbuf
= NULL
;
573 vl_mc_cleanup_buffer(struct vl_mc_buffer
*buffer
)
579 vl_mc_set_surface(struct vl_mc_buffer
*buffer
, struct pipe_surface
*surface
)
581 assert(buffer
&& surface
);
583 buffer
->surface_cleared
= false;
585 buffer
->viewport
.scale
[0] = surface
->width
;
586 buffer
->viewport
.scale
[1] = surface
->height
;
588 buffer
->fb_state
.width
= surface
->width
;
589 buffer
->fb_state
.height
= surface
->height
;
590 buffer
->fb_state
.cbufs
[0] = surface
;
594 prepare_pipe_4_rendering(struct vl_mc_buffer
*buffer
, unsigned component
, unsigned mask
)
596 struct vl_mc
*renderer
;
600 renderer
= buffer
->renderer
;
601 renderer
->pipe
->bind_rasterizer_state(renderer
->pipe
, renderer
->rs_state
);
603 if (buffer
->surface_cleared
|| component
> 0)
604 renderer
->pipe
->bind_blend_state(renderer
->pipe
, renderer
->blend_add
[mask
]);
606 renderer
->pipe
->bind_blend_state(renderer
->pipe
, renderer
->blend_clear
[mask
]);
608 renderer
->pipe
->set_framebuffer_state(renderer
->pipe
, &buffer
->fb_state
);
609 renderer
->pipe
->set_viewport_state(renderer
->pipe
, &buffer
->viewport
);
613 vl_mc_render_ref(struct vl_mc_buffer
*buffer
, struct pipe_sampler_view
*ref
)
615 struct vl_mc
*renderer
;
617 assert(buffer
&& ref
);
619 prepare_pipe_4_rendering(buffer
, 0, PIPE_MASK_R
| PIPE_MASK_G
| PIPE_MASK_B
);
621 renderer
= buffer
->renderer
;
623 renderer
->pipe
->bind_vs_state(renderer
->pipe
, renderer
->vs_ref
);
624 renderer
->pipe
->bind_fs_state(renderer
->pipe
, renderer
->fs_ref
);
626 renderer
->pipe
->set_fragment_sampler_views(renderer
->pipe
, 1, &ref
);
627 renderer
->pipe
->bind_fragment_sampler_states(renderer
->pipe
, 1, &renderer
->sampler_ref
);
629 util_draw_arrays_instanced(renderer
->pipe
, PIPE_PRIM_QUADS
, 0, 4, 0,
630 renderer
->buffer_width
/ MACROBLOCK_WIDTH
*
631 renderer
->buffer_height
/ MACROBLOCK_HEIGHT
);
633 buffer
->surface_cleared
= true;
637 vl_mc_render_ycbcr(struct vl_mc_buffer
*buffer
, unsigned component
, unsigned num_instances
)
639 struct vl_mc
*renderer
;
640 unsigned mask
= 1 << component
;
644 if (num_instances
== 0)
647 prepare_pipe_4_rendering(buffer
, component
, mask
);
649 renderer
= buffer
->renderer
;
651 renderer
->pipe
->bind_vs_state(renderer
->pipe
, renderer
->vs_ycbcr
);
652 renderer
->pipe
->bind_fs_state(renderer
->pipe
, renderer
->fs_ycbcr
);
654 util_draw_arrays_instanced(renderer
->pipe
, PIPE_PRIM_QUADS
, 0, 4, 0, num_instances
);
656 if (buffer
->surface_cleared
) {
657 renderer
->pipe
->bind_blend_state(renderer
->pipe
, renderer
->blend_sub
[mask
]);
658 renderer
->pipe
->bind_fs_state(renderer
->pipe
, renderer
->fs_ycbcr_sub
);
659 util_draw_arrays_instanced(renderer
->pipe
, PIPE_PRIM_QUADS
, 0, 4, 0, num_instances
);