1 /**************************************************************************
3 * Copyright 2009 Younes Manton.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 #include <pipe/p_context.h>
32 #include <util/u_sampler.h>
33 #include <util/u_draw.h>
35 #include <tgsi/tgsi_ureg.h>
37 #include "vl_defines.h"
38 #include "vl_vertex_buffers.h"
48 VS_O_FLAGS
= VS_O_VTOP
,
49 VS_O_VTEX
= VS_O_VBOTTOM
52 static struct ureg_dst
53 calc_position(struct vl_mc
*r
, struct ureg_program
*shader
, struct ureg_src block_scale
)
55 struct ureg_src vrect
, vpos
;
56 struct ureg_dst t_vpos
;
57 struct ureg_dst o_vpos
;
59 vrect
= ureg_DECL_vs_input(shader
, VS_I_RECT
);
60 vpos
= ureg_DECL_vs_input(shader
, VS_I_VPOS
);
62 t_vpos
= ureg_DECL_temporary(shader
);
64 o_vpos
= ureg_DECL_output(shader
, TGSI_SEMANTIC_POSITION
, VS_O_VPOS
);
67 * block_scale = (MACROBLOCK_WIDTH, MACROBLOCK_HEIGHT) / (dst.width, dst.height)
69 * t_vpos = (vpos + vrect) * block_scale
73 ureg_ADD(shader
, ureg_writemask(t_vpos
, TGSI_WRITEMASK_XY
), vpos
, vrect
);
74 ureg_MUL(shader
, ureg_writemask(t_vpos
, TGSI_WRITEMASK_XY
), ureg_src(t_vpos
), block_scale
);
75 ureg_MOV(shader
, ureg_writemask(o_vpos
, TGSI_WRITEMASK_XY
), ureg_src(t_vpos
));
76 ureg_MOV(shader
, ureg_writemask(o_vpos
, TGSI_WRITEMASK_ZW
), ureg_imm1f(shader
, 1.0f
));
81 static struct ureg_dst
82 calc_line(struct ureg_program
*shader
)
87 tmp
= ureg_DECL_temporary(shader
);
89 pos
= ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_POSITION
, VS_O_VPOS
, TGSI_INTERPOLATE_LINEAR
);
92 * tmp.y = fraction(pos.y / 2) >= 0.5 ? 1 : 0
94 ureg_MUL(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_Y
), pos
, ureg_imm1f(shader
, 0.5f
));
95 ureg_FRC(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_Y
), ureg_src(tmp
));
96 ureg_SGE(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_Y
), ureg_src(tmp
), ureg_imm1f(shader
, 0.5f
));
102 create_ref_vert_shader(struct vl_mc
*r
)
104 struct ureg_program
*shader
;
105 struct ureg_src mv_scale
;
106 struct ureg_src vmv
[2];
107 struct ureg_dst t_vpos
;
108 struct ureg_dst o_vmv
[2];
111 shader
= ureg_create(TGSI_PROCESSOR_VERTEX
);
115 vmv
[0] = ureg_DECL_vs_input(shader
, VS_I_MV_TOP
);
116 vmv
[1] = ureg_DECL_vs_input(shader
, VS_I_MV_BOTTOM
);
118 t_vpos
= calc_position(r
, shader
, ureg_imm2f(shader
,
119 (float)MACROBLOCK_WIDTH
/ r
->buffer_width
,
120 (float)MACROBLOCK_HEIGHT
/ r
->buffer_height
)
123 o_vmv
[0] = ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VTOP
);
124 o_vmv
[1] = ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VBOTTOM
);
127 * mv_scale.xy = 0.5 / (dst.width, dst.height);
128 * mv_scale.z = 1.0f / 4.0f
129 * mv_scale.w = 1.0f / 255.0f
131 * // Apply motion vectors
132 * o_vmv[0..1].xy = vmv[0..1] * mv_scale + t_vpos
133 * o_vmv[0..1].zw = vmv[0..1] * mv_scale
137 mv_scale
= ureg_imm4f(shader
,
138 0.5f
/ r
->buffer_width
,
139 0.5f
/ r
->buffer_height
,
141 1.0f
/ PIPE_VIDEO_MV_WEIGHT_MAX
);
143 for (i
= 0; i
< 2; ++i
) {
144 ureg_MAD(shader
, ureg_writemask(o_vmv
[i
], TGSI_WRITEMASK_XY
), mv_scale
, vmv
[i
], ureg_src(t_vpos
));
145 ureg_MUL(shader
, ureg_writemask(o_vmv
[i
], TGSI_WRITEMASK_ZW
), mv_scale
, vmv
[i
]);
148 ureg_release_temporary(shader
, t_vpos
);
152 return ureg_create_shader_and_destroy(shader
, r
->pipe
);
156 create_ref_frag_shader(struct vl_mc
*r
)
158 const float y_scale
=
159 r
->buffer_height
/ 2 *
160 r
->macroblock_size
/ MACROBLOCK_HEIGHT
;
162 struct ureg_program
*shader
;
163 struct ureg_src tc
[2], sampler
;
164 struct ureg_dst ref
, field
;
165 struct ureg_dst fragment
;
168 shader
= ureg_create(TGSI_PROCESSOR_FRAGMENT
);
172 tc
[0] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VTOP
, TGSI_INTERPOLATE_LINEAR
);
173 tc
[1] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VBOTTOM
, TGSI_INTERPOLATE_LINEAR
);
175 sampler
= ureg_DECL_sampler(shader
, 0);
176 ref
= ureg_DECL_temporary(shader
);
178 fragment
= ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, 0);
180 field
= calc_line(shader
);
183 * ref = field.z ? tc[1] : tc[0]
185 * // Adjust tc acording to top/bottom field selection
188 * ref.y = floor(ref.y)
192 * fragment.xyz = tex(ref, sampler[0])
194 ureg_CMP(shader
, ureg_writemask(ref
, TGSI_WRITEMASK_XYZ
),
195 ureg_negate(ureg_scalar(ureg_src(field
), TGSI_SWIZZLE_Y
)),
197 ureg_CMP(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_W
),
198 ureg_negate(ureg_scalar(ureg_src(field
), TGSI_SWIZZLE_Y
)),
201 ureg_IF(shader
, ureg_scalar(ureg_src(ref
), TGSI_SWIZZLE_Z
), &label
);
203 ureg_MUL(shader
, ureg_writemask(ref
, TGSI_WRITEMASK_Y
),
204 ureg_src(ref
), ureg_imm1f(shader
, y_scale
));
205 ureg_FLR(shader
, ureg_writemask(ref
, TGSI_WRITEMASK_Y
), ureg_src(ref
));
206 ureg_ADD(shader
, ureg_writemask(ref
, TGSI_WRITEMASK_Y
),
207 ureg_src(ref
), ureg_scalar(ureg_src(ref
), TGSI_SWIZZLE_Z
));
208 ureg_MUL(shader
, ureg_writemask(ref
, TGSI_WRITEMASK_Y
),
209 ureg_src(ref
), ureg_imm1f(shader
, 1.0f
/ y_scale
));
211 ureg_fixup_label(shader
, label
, ureg_get_instruction_number(shader
));
214 ureg_TEX(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_XYZ
), TGSI_TEXTURE_2D
, ureg_src(ref
), sampler
);
216 ureg_release_temporary(shader
, ref
);
218 ureg_release_temporary(shader
, field
);
221 return ureg_create_shader_and_destroy(shader
, r
->pipe
);
225 create_ycbcr_vert_shader(struct vl_mc
*r
, vl_mc_ycbcr_vert_shader vs_callback
, void *callback_priv
)
227 struct ureg_program
*shader
;
229 struct ureg_src vrect
, vpos
;
230 struct ureg_dst t_vpos
, t_vtex
;
231 struct ureg_dst o_vpos
, o_flags
;
233 struct vertex2f scale
= {
234 (float)BLOCK_WIDTH
/ r
->buffer_width
* MACROBLOCK_WIDTH
/ r
->macroblock_size
,
235 (float)BLOCK_HEIGHT
/ r
->buffer_height
* MACROBLOCK_HEIGHT
/ r
->macroblock_size
240 shader
= ureg_create(TGSI_PROCESSOR_VERTEX
);
244 vrect
= ureg_DECL_vs_input(shader
, VS_I_RECT
);
245 vpos
= ureg_DECL_vs_input(shader
, VS_I_VPOS
);
247 t_vpos
= calc_position(r
, shader
, ureg_imm2f(shader
, scale
.x
, scale
.y
));
248 t_vtex
= ureg_DECL_temporary(shader
);
250 o_vpos
= ureg_DECL_output(shader
, TGSI_SEMANTIC_POSITION
, VS_O_VPOS
);
251 o_flags
= ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_FLAGS
);
255 * o_flags.z = intra * 0.5
258 * t_vtex.xy = vrect.y ? { 0, scale.y } : { -scale.y : 0 }
259 * t_vtex.z = vpos.y % 2
260 * t_vtex.y = t_vtex.z ? t_vtex.x : t_vtex.y
261 * o_vpos.y = t_vtex.y + t_vpos.y
263 * o_flags.w = t_vtex.z ? 0 : 1
268 vs_callback(callback_priv
, r
, shader
, VS_O_VTEX
, t_vpos
);
270 ureg_MUL(shader
, ureg_writemask(o_flags
, TGSI_WRITEMASK_Z
),
271 ureg_scalar(vpos
, TGSI_SWIZZLE_Z
), ureg_imm1f(shader
, 0.5f
));
272 ureg_MOV(shader
, ureg_writemask(o_flags
, TGSI_WRITEMASK_W
), ureg_imm1f(shader
, -1.0f
));
274 if (r
->macroblock_size
== MACROBLOCK_HEIGHT
) { //TODO
275 ureg_IF(shader
, ureg_scalar(vpos
, TGSI_SWIZZLE_W
), &label
);
277 ureg_CMP(shader
, ureg_writemask(t_vtex
, TGSI_WRITEMASK_XY
),
278 ureg_negate(ureg_scalar(vrect
, TGSI_SWIZZLE_Y
)),
279 ureg_imm2f(shader
, 0.0f
, scale
.y
),
280 ureg_imm2f(shader
, -scale
.y
, 0.0f
));
281 ureg_MUL(shader
, ureg_writemask(t_vtex
, TGSI_WRITEMASK_Z
),
282 ureg_scalar(vpos
, TGSI_SWIZZLE_Y
), ureg_imm1f(shader
, 0.5f
));
284 ureg_FRC(shader
, ureg_writemask(t_vtex
, TGSI_WRITEMASK_Z
), ureg_src(t_vtex
));
286 ureg_CMP(shader
, ureg_writemask(t_vtex
, TGSI_WRITEMASK_Y
),
287 ureg_negate(ureg_scalar(ureg_src(t_vtex
), TGSI_SWIZZLE_Z
)),
288 ureg_scalar(ureg_src(t_vtex
), TGSI_SWIZZLE_X
),
289 ureg_scalar(ureg_src(t_vtex
), TGSI_SWIZZLE_Y
));
290 ureg_ADD(shader
, ureg_writemask(o_vpos
, TGSI_WRITEMASK_Y
),
291 ureg_src(t_vpos
), ureg_src(t_vtex
));
293 ureg_CMP(shader
, ureg_writemask(o_flags
, TGSI_WRITEMASK_W
),
294 ureg_negate(ureg_scalar(ureg_src(t_vtex
), TGSI_SWIZZLE_Z
)),
295 ureg_imm1f(shader
, 0.0f
), ureg_imm1f(shader
, 1.0f
));
297 ureg_fixup_label(shader
, label
, ureg_get_instruction_number(shader
));
301 ureg_release_temporary(shader
, t_vtex
);
302 ureg_release_temporary(shader
, t_vpos
);
306 return ureg_create_shader_and_destroy(shader
, r
->pipe
);
310 create_ycbcr_frag_shader(struct vl_mc
*r
, float scale
, bool invert
,
311 vl_mc_ycbcr_frag_shader fs_callback
, void *callback_priv
)
313 struct ureg_program
*shader
;
314 struct ureg_src flags
;
316 struct ureg_dst fragment
;
319 shader
= ureg_create(TGSI_PROCESSOR_FRAGMENT
);
323 flags
= ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_FLAGS
, TGSI_INTERPOLATE_LINEAR
);
325 fragment
= ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, 0);
327 tmp
= calc_line(shader
);
333 * fragment.xyz = tex(tc, sampler) * scale + tc.z
338 ureg_SEQ(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_Y
),
339 ureg_scalar(flags
, TGSI_SWIZZLE_W
), ureg_src(tmp
));
341 ureg_IF(shader
, ureg_scalar(ureg_src(tmp
), TGSI_SWIZZLE_Y
), &label
);
345 ureg_fixup_label(shader
, label
, ureg_get_instruction_number(shader
));
346 ureg_ELSE(shader
, &label
);
348 fs_callback(callback_priv
, r
, shader
, VS_O_VTEX
, tmp
);
351 ureg_MAD(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_XYZ
),
352 ureg_src(tmp
), ureg_imm1f(shader
, scale
),
353 ureg_scalar(flags
, TGSI_SWIZZLE_Z
));
355 ureg_ADD(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_XYZ
),
356 ureg_src(tmp
), ureg_scalar(flags
, TGSI_SWIZZLE_Z
));
358 ureg_MUL(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_XYZ
), ureg_src(tmp
), ureg_imm1f(shader
, invert
? -1.0f
: 1.0f
));
359 ureg_MOV(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_W
), ureg_imm1f(shader
, 1.0f
));
361 ureg_fixup_label(shader
, label
, ureg_get_instruction_number(shader
));
364 ureg_release_temporary(shader
, tmp
);
368 return ureg_create_shader_and_destroy(shader
, r
->pipe
);
372 init_pipe_state(struct vl_mc
*r
)
374 struct pipe_sampler_state sampler
;
375 struct pipe_blend_state blend
;
376 struct pipe_rasterizer_state rs_state
;
381 memset(&sampler
, 0, sizeof(sampler
));
382 sampler
.wrap_s
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
383 sampler
.wrap_t
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
384 sampler
.wrap_r
= PIPE_TEX_WRAP_CLAMP_TO_BORDER
;
385 sampler
.min_img_filter
= PIPE_TEX_FILTER_LINEAR
;
386 sampler
.min_mip_filter
= PIPE_TEX_MIPFILTER_NONE
;
387 sampler
.mag_img_filter
= PIPE_TEX_FILTER_LINEAR
;
388 sampler
.compare_mode
= PIPE_TEX_COMPARE_NONE
;
389 sampler
.compare_func
= PIPE_FUNC_ALWAYS
;
390 sampler
.normalized_coords
= 1;
391 r
->sampler_ref
= r
->pipe
->create_sampler_state(r
->pipe
, &sampler
);
393 goto error_sampler_ref
;
395 for (i
= 0; i
< VL_MC_NUM_BLENDERS
; ++i
) {
396 memset(&blend
, 0, sizeof blend
);
397 blend
.independent_blend_enable
= 0;
398 blend
.rt
[0].blend_enable
= 1;
399 blend
.rt
[0].rgb_func
= PIPE_BLEND_ADD
;
400 blend
.rt
[0].rgb_src_factor
= PIPE_BLENDFACTOR_SRC_ALPHA
;
401 blend
.rt
[0].rgb_dst_factor
= PIPE_BLENDFACTOR_ZERO
;
402 blend
.rt
[0].alpha_func
= PIPE_BLEND_ADD
;
403 blend
.rt
[0].alpha_src_factor
= PIPE_BLENDFACTOR_SRC_ALPHA
;
404 blend
.rt
[0].alpha_dst_factor
= PIPE_BLENDFACTOR_ZERO
;
405 blend
.logicop_enable
= 0;
406 blend
.logicop_func
= PIPE_LOGICOP_CLEAR
;
407 blend
.rt
[0].colormask
= i
;
409 r
->blend_clear
[i
] = r
->pipe
->create_blend_state(r
->pipe
, &blend
);
410 if (!r
->blend_clear
[i
])
413 blend
.rt
[0].rgb_dst_factor
= PIPE_BLENDFACTOR_ONE
;
414 blend
.rt
[0].alpha_dst_factor
= PIPE_BLENDFACTOR_ONE
;
415 r
->blend_add
[i
] = r
->pipe
->create_blend_state(r
->pipe
, &blend
);
416 if (!r
->blend_add
[i
])
419 blend
.rt
[0].rgb_func
= PIPE_BLEND_REVERSE_SUBTRACT
;
420 blend
.rt
[0].alpha_dst_factor
= PIPE_BLEND_REVERSE_SUBTRACT
;
421 r
->blend_sub
[i
] = r
->pipe
->create_blend_state(r
->pipe
, &blend
);
422 if (!r
->blend_sub
[i
])
426 memset(&rs_state
, 0, sizeof(rs_state
));
427 /*rs_state.sprite_coord_enable */
428 rs_state
.sprite_coord_mode
= PIPE_SPRITE_COORD_UPPER_LEFT
;
429 rs_state
.point_quad_rasterization
= true;
430 rs_state
.point_size
= BLOCK_WIDTH
;
431 rs_state
.gl_rasterization_rules
= true;
432 r
->rs_state
= r
->pipe
->create_rasterizer_state(r
->pipe
, &rs_state
);
440 for (i
= 0; i
< VL_MC_NUM_BLENDERS
; ++i
) {
442 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_sub
[i
]);
445 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_add
[i
]);
447 if (r
->blend_clear
[i
])
448 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_clear
[i
]);
451 r
->pipe
->delete_sampler_state(r
->pipe
, r
->sampler_ref
);
458 cleanup_pipe_state(struct vl_mc
*r
)
464 r
->pipe
->delete_sampler_state(r
->pipe
, r
->sampler_ref
);
465 for (i
= 0; i
< VL_MC_NUM_BLENDERS
; ++i
) {
466 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_clear
[i
]);
467 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_add
[i
]);
468 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_sub
[i
]);
470 r
->pipe
->delete_rasterizer_state(r
->pipe
, r
->rs_state
);
474 vl_mc_init(struct vl_mc
*renderer
, struct pipe_context
*pipe
,
475 unsigned buffer_width
, unsigned buffer_height
,
476 unsigned macroblock_size
, float scale
,
477 vl_mc_ycbcr_vert_shader vs_callback
,
478 vl_mc_ycbcr_frag_shader fs_callback
,
484 memset(renderer
, 0, sizeof(struct vl_mc
));
486 renderer
->pipe
= pipe
;
487 renderer
->buffer_width
= buffer_width
;
488 renderer
->buffer_height
= buffer_height
;
489 renderer
->macroblock_size
= macroblock_size
;
491 if (!init_pipe_state(renderer
))
492 goto error_pipe_state
;
494 renderer
->vs_ref
= create_ref_vert_shader(renderer
);
495 if (!renderer
->vs_ref
)
498 renderer
->vs_ycbcr
= create_ycbcr_vert_shader(renderer
, vs_callback
, callback_priv
);
499 if (!renderer
->vs_ycbcr
)
502 renderer
->fs_ref
= create_ref_frag_shader(renderer
);
503 if (!renderer
->fs_ref
)
506 renderer
->fs_ycbcr
= create_ycbcr_frag_shader(renderer
, scale
, false, fs_callback
, callback_priv
);
507 if (!renderer
->fs_ycbcr
)
510 renderer
->fs_ycbcr_sub
= create_ycbcr_frag_shader(renderer
, scale
, true, fs_callback
, callback_priv
);
511 if (!renderer
->fs_ycbcr_sub
)
512 goto error_fs_ycbcr_sub
;
517 renderer
->pipe
->delete_fs_state(renderer
->pipe
, renderer
->fs_ycbcr
);
520 renderer
->pipe
->delete_fs_state(renderer
->pipe
, renderer
->fs_ref
);
523 renderer
->pipe
->delete_vs_state(renderer
->pipe
, renderer
->vs_ycbcr
);
526 renderer
->pipe
->delete_vs_state(renderer
->pipe
, renderer
->vs_ref
);
529 cleanup_pipe_state(renderer
);
536 vl_mc_cleanup(struct vl_mc
*renderer
)
540 cleanup_pipe_state(renderer
);
542 renderer
->pipe
->delete_vs_state(renderer
->pipe
, renderer
->vs_ref
);
543 renderer
->pipe
->delete_vs_state(renderer
->pipe
, renderer
->vs_ycbcr
);
544 renderer
->pipe
->delete_fs_state(renderer
->pipe
, renderer
->fs_ref
);
545 renderer
->pipe
->delete_fs_state(renderer
->pipe
, renderer
->fs_ycbcr
);
546 renderer
->pipe
->delete_fs_state(renderer
->pipe
, renderer
->fs_ycbcr_sub
);
550 vl_mc_init_buffer(struct vl_mc
*renderer
, struct vl_mc_buffer
*buffer
)
552 assert(renderer
&& buffer
);
554 buffer
->renderer
= renderer
;
556 buffer
->viewport
.scale
[2] = 1;
557 buffer
->viewport
.scale
[3] = 1;
558 buffer
->viewport
.translate
[0] = 0;
559 buffer
->viewport
.translate
[1] = 0;
560 buffer
->viewport
.translate
[2] = 0;
561 buffer
->viewport
.translate
[3] = 0;
563 buffer
->fb_state
.nr_cbufs
= 1;
564 buffer
->fb_state
.zsbuf
= NULL
;
570 vl_mc_cleanup_buffer(struct vl_mc_buffer
*buffer
)
576 vl_mc_set_surface(struct vl_mc_buffer
*buffer
, struct pipe_surface
*surface
)
578 assert(buffer
&& surface
);
580 buffer
->surface_cleared
= false;
582 buffer
->viewport
.scale
[0] = surface
->width
;
583 buffer
->viewport
.scale
[1] = surface
->height
;
585 buffer
->fb_state
.width
= surface
->width
;
586 buffer
->fb_state
.height
= surface
->height
;
587 buffer
->fb_state
.cbufs
[0] = surface
;
591 prepare_pipe_4_rendering(struct vl_mc_buffer
*buffer
, unsigned mask
)
593 struct vl_mc
*renderer
;
597 renderer
= buffer
->renderer
;
598 renderer
->pipe
->bind_rasterizer_state(renderer
->pipe
, renderer
->rs_state
);
600 if (buffer
->surface_cleared
)
601 renderer
->pipe
->bind_blend_state(renderer
->pipe
, renderer
->blend_add
[mask
]);
603 renderer
->pipe
->bind_blend_state(renderer
->pipe
, renderer
->blend_clear
[mask
]);
605 renderer
->pipe
->set_framebuffer_state(renderer
->pipe
, &buffer
->fb_state
);
606 renderer
->pipe
->set_viewport_state(renderer
->pipe
, &buffer
->viewport
);
610 vl_mc_render_ref(struct vl_mc_buffer
*buffer
, struct pipe_sampler_view
*ref
)
612 struct vl_mc
*renderer
;
614 assert(buffer
&& ref
);
616 prepare_pipe_4_rendering(buffer
, PIPE_MASK_R
| PIPE_MASK_G
| PIPE_MASK_B
);
618 renderer
= buffer
->renderer
;
620 renderer
->pipe
->bind_vs_state(renderer
->pipe
, renderer
->vs_ref
);
621 renderer
->pipe
->bind_fs_state(renderer
->pipe
, renderer
->fs_ref
);
623 renderer
->pipe
->set_fragment_sampler_views(renderer
->pipe
, 1, &ref
);
624 renderer
->pipe
->bind_fragment_sampler_states(renderer
->pipe
, 1, &renderer
->sampler_ref
);
626 util_draw_arrays_instanced(renderer
->pipe
, PIPE_PRIM_QUADS
, 0, 4, 0,
627 renderer
->buffer_width
/ MACROBLOCK_WIDTH
*
628 renderer
->buffer_height
/ MACROBLOCK_HEIGHT
);
630 buffer
->surface_cleared
= true;
634 vl_mc_render_ycbcr(struct vl_mc_buffer
*buffer
, unsigned component
, unsigned num_instances
)
636 struct vl_mc
*renderer
;
637 unsigned mask
= 1 << component
;
641 if (num_instances
== 0)
644 prepare_pipe_4_rendering(buffer
, mask
);
646 renderer
= buffer
->renderer
;
648 renderer
->pipe
->bind_vs_state(renderer
->pipe
, renderer
->vs_ycbcr
);
649 renderer
->pipe
->bind_fs_state(renderer
->pipe
, renderer
->fs_ycbcr
);
651 util_draw_arrays_instanced(renderer
->pipe
, PIPE_PRIM_QUADS
, 0, 4, 0, num_instances
);
653 if (buffer
->surface_cleared
) {
654 renderer
->pipe
->bind_blend_state(renderer
->pipe
, renderer
->blend_sub
[mask
]);
655 renderer
->pipe
->bind_fs_state(renderer
->pipe
, renderer
->fs_ycbcr_sub
);
656 util_draw_arrays_instanced(renderer
->pipe
, PIPE_PRIM_QUADS
, 0, 4, 0, num_instances
);