1 /**************************************************************************
3 * Copyright 2009 Younes Manton.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 #include "pipe/p_context.h"
32 #include "util/u_sampler.h"
33 #include "util/u_draw.h"
35 #include "tgsi/tgsi_ureg.h"
37 #include "vl_defines.h"
38 #include "vl_vertex_buffers.h"
48 VS_O_FLAGS
= VS_O_VTOP
,
49 VS_O_VTEX
= VS_O_VBOTTOM
52 static struct ureg_dst
53 calc_position(struct vl_mc
*r
, struct ureg_program
*shader
, struct ureg_src block_scale
)
55 struct ureg_src vrect
, vpos
;
56 struct ureg_dst t_vpos
;
57 struct ureg_dst o_vpos
;
59 vrect
= ureg_DECL_vs_input(shader
, VS_I_RECT
);
60 vpos
= ureg_DECL_vs_input(shader
, VS_I_VPOS
);
62 t_vpos
= ureg_DECL_temporary(shader
);
64 o_vpos
= ureg_DECL_output(shader
, TGSI_SEMANTIC_POSITION
, VS_O_VPOS
);
67 * block_scale = (VL_MACROBLOCK_WIDTH, VL_MACROBLOCK_HEIGHT) / (dst.width, dst.height)
69 * t_vpos = (vpos + vrect) * block_scale
73 ureg_ADD(shader
, ureg_writemask(t_vpos
, TGSI_WRITEMASK_XY
), vpos
, vrect
);
74 ureg_MUL(shader
, ureg_writemask(t_vpos
, TGSI_WRITEMASK_XY
), ureg_src(t_vpos
), block_scale
);
75 ureg_MOV(shader
, ureg_writemask(o_vpos
, TGSI_WRITEMASK_XY
), ureg_src(t_vpos
));
76 ureg_MOV(shader
, ureg_writemask(o_vpos
, TGSI_WRITEMASK_ZW
), ureg_imm1f(shader
, 1.0f
));
81 static struct ureg_dst
82 calc_line(struct ureg_program
*shader
)
87 tmp
= ureg_DECL_temporary(shader
);
89 pos
= ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_POSITION
, VS_O_VPOS
, TGSI_INTERPOLATE_LINEAR
);
92 * tmp.y = fraction(pos.y / 2) >= 0.5 ? 1 : 0
94 ureg_MUL(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_Y
), pos
, ureg_imm1f(shader
, 0.5f
));
95 ureg_FRC(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_Y
), ureg_src(tmp
));
96 ureg_SGE(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_Y
), ureg_src(tmp
), ureg_imm1f(shader
, 0.5f
));
102 create_ref_vert_shader(struct vl_mc
*r
)
104 struct ureg_program
*shader
;
105 struct ureg_src mv_scale
;
106 struct ureg_src vmv
[2];
107 struct ureg_dst t_vpos
;
108 struct ureg_dst o_vmv
[2];
111 shader
= ureg_create(TGSI_PROCESSOR_VERTEX
);
115 vmv
[0] = ureg_DECL_vs_input(shader
, VS_I_MV_TOP
);
116 vmv
[1] = ureg_DECL_vs_input(shader
, VS_I_MV_BOTTOM
);
118 t_vpos
= calc_position(r
, shader
, ureg_imm2f(shader
,
119 (float)VL_MACROBLOCK_WIDTH
/ r
->buffer_width
,
120 (float)VL_MACROBLOCK_HEIGHT
/ r
->buffer_height
)
123 o_vmv
[0] = ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VTOP
);
124 o_vmv
[1] = ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VBOTTOM
);
127 * mv_scale.xy = 0.5 / (dst.width, dst.height);
128 * mv_scale.z = 1.0f / 4.0f
129 * mv_scale.w = 1.0f / 255.0f
131 * // Apply motion vectors
132 * o_vmv[0..1].xy = vmv[0..1] * mv_scale + t_vpos
133 * o_vmv[0..1].zw = vmv[0..1] * mv_scale
137 mv_scale
= ureg_imm4f(shader
,
138 0.5f
/ r
->buffer_width
,
139 0.5f
/ r
->buffer_height
,
141 1.0f
/ PIPE_VIDEO_MV_WEIGHT_MAX
);
143 for (i
= 0; i
< 2; ++i
) {
144 ureg_MAD(shader
, ureg_writemask(o_vmv
[i
], TGSI_WRITEMASK_XY
), mv_scale
, vmv
[i
], ureg_src(t_vpos
));
145 ureg_MUL(shader
, ureg_writemask(o_vmv
[i
], TGSI_WRITEMASK_ZW
), mv_scale
, vmv
[i
]);
148 ureg_release_temporary(shader
, t_vpos
);
152 return ureg_create_shader_and_destroy(shader
, r
->pipe
);
156 create_ref_frag_shader(struct vl_mc
*r
)
158 const float y_scale
=
159 r
->buffer_height
/ 2 *
160 r
->macroblock_size
/ VL_MACROBLOCK_HEIGHT
;
162 struct ureg_program
*shader
;
163 struct ureg_src tc
[2], sampler
;
164 struct ureg_dst ref
, field
;
165 struct ureg_dst fragment
;
168 shader
= ureg_create(TGSI_PROCESSOR_FRAGMENT
);
172 tc
[0] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VTOP
, TGSI_INTERPOLATE_LINEAR
);
173 tc
[1] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VBOTTOM
, TGSI_INTERPOLATE_LINEAR
);
175 sampler
= ureg_DECL_sampler(shader
, 0);
176 ref
= ureg_DECL_temporary(shader
);
178 fragment
= ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, 0);
180 field
= calc_line(shader
);
183 * ref = field.z ? tc[1] : tc[0]
185 * // Adjust tc acording to top/bottom field selection
188 * ref.y = floor(ref.y)
192 * fragment.xyz = tex(ref, sampler[0])
194 ureg_CMP(shader
, ureg_writemask(ref
, TGSI_WRITEMASK_XYZ
),
195 ureg_negate(ureg_scalar(ureg_src(field
), TGSI_SWIZZLE_Y
)),
197 ureg_CMP(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_W
),
198 ureg_negate(ureg_scalar(ureg_src(field
), TGSI_SWIZZLE_Y
)),
201 ureg_IF(shader
, ureg_scalar(ureg_src(ref
), TGSI_SWIZZLE_Z
), &label
);
203 ureg_MUL(shader
, ureg_writemask(ref
, TGSI_WRITEMASK_Y
),
204 ureg_src(ref
), ureg_imm1f(shader
, y_scale
));
205 ureg_FLR(shader
, ureg_writemask(ref
, TGSI_WRITEMASK_Y
), ureg_src(ref
));
206 ureg_ADD(shader
, ureg_writemask(ref
, TGSI_WRITEMASK_Y
),
207 ureg_src(ref
), ureg_scalar(ureg_src(ref
), TGSI_SWIZZLE_Z
));
208 ureg_MUL(shader
, ureg_writemask(ref
, TGSI_WRITEMASK_Y
),
209 ureg_src(ref
), ureg_imm1f(shader
, 1.0f
/ y_scale
));
211 ureg_fixup_label(shader
, label
, ureg_get_instruction_number(shader
));
214 ureg_TEX(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_XYZ
), TGSI_TEXTURE_2D
, ureg_src(ref
), sampler
);
216 ureg_release_temporary(shader
, ref
);
218 ureg_release_temporary(shader
, field
);
221 return ureg_create_shader_and_destroy(shader
, r
->pipe
);
225 create_ycbcr_vert_shader(struct vl_mc
*r
, vl_mc_ycbcr_vert_shader vs_callback
, void *callback_priv
)
227 struct ureg_program
*shader
;
229 struct ureg_src vrect
, vpos
;
230 struct ureg_dst t_vpos
, t_vtex
;
231 struct ureg_dst o_vpos
, o_flags
;
233 struct vertex2f scale
= {
234 (float)VL_BLOCK_WIDTH
/ r
->buffer_width
* VL_MACROBLOCK_WIDTH
/ r
->macroblock_size
,
235 (float)VL_BLOCK_HEIGHT
/ r
->buffer_height
* VL_MACROBLOCK_HEIGHT
/ r
->macroblock_size
240 shader
= ureg_create(TGSI_PROCESSOR_VERTEX
);
244 vrect
= ureg_DECL_vs_input(shader
, VS_I_RECT
);
245 vpos
= ureg_DECL_vs_input(shader
, VS_I_VPOS
);
247 t_vpos
= calc_position(r
, shader
, ureg_imm2f(shader
, scale
.x
, scale
.y
));
248 t_vtex
= ureg_DECL_temporary(shader
);
250 o_vpos
= ureg_DECL_output(shader
, TGSI_SEMANTIC_POSITION
, VS_O_VPOS
);
251 o_flags
= ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_FLAGS
);
255 * o_flags.z = intra * 0.5
258 * t_vtex.xy = vrect.y ? { 0, scale.y } : { -scale.y : 0 }
259 * t_vtex.z = vpos.y % 2
260 * t_vtex.y = t_vtex.z ? t_vtex.x : t_vtex.y
261 * o_vpos.y = t_vtex.y + t_vpos.y
263 * o_flags.w = t_vtex.z ? 0 : 1
268 vs_callback(callback_priv
, r
, shader
, VS_O_VTEX
, t_vpos
);
270 ureg_MUL(shader
, ureg_writemask(o_flags
, TGSI_WRITEMASK_Z
),
271 ureg_scalar(vpos
, TGSI_SWIZZLE_Z
), ureg_imm1f(shader
, 0.5f
));
272 ureg_MOV(shader
, ureg_writemask(o_flags
, TGSI_WRITEMASK_W
), ureg_imm1f(shader
, -1.0f
));
274 if (r
->macroblock_size
== VL_MACROBLOCK_HEIGHT
) { //TODO
275 ureg_IF(shader
, ureg_scalar(vpos
, TGSI_SWIZZLE_W
), &label
);
277 ureg_CMP(shader
, ureg_writemask(t_vtex
, TGSI_WRITEMASK_XY
),
278 ureg_negate(ureg_scalar(vrect
, TGSI_SWIZZLE_Y
)),
279 ureg_imm2f(shader
, 0.0f
, scale
.y
),
280 ureg_imm2f(shader
, -scale
.y
, 0.0f
));
281 ureg_MUL(shader
, ureg_writemask(t_vtex
, TGSI_WRITEMASK_Z
),
282 ureg_scalar(vpos
, TGSI_SWIZZLE_Y
), ureg_imm1f(shader
, 0.5f
));
284 ureg_FRC(shader
, ureg_writemask(t_vtex
, TGSI_WRITEMASK_Z
), ureg_src(t_vtex
));
286 ureg_CMP(shader
, ureg_writemask(t_vtex
, TGSI_WRITEMASK_Y
),
287 ureg_negate(ureg_scalar(ureg_src(t_vtex
), TGSI_SWIZZLE_Z
)),
288 ureg_scalar(ureg_src(t_vtex
), TGSI_SWIZZLE_X
),
289 ureg_scalar(ureg_src(t_vtex
), TGSI_SWIZZLE_Y
));
290 ureg_ADD(shader
, ureg_writemask(o_vpos
, TGSI_WRITEMASK_Y
),
291 ureg_src(t_vpos
), ureg_src(t_vtex
));
293 ureg_CMP(shader
, ureg_writemask(o_flags
, TGSI_WRITEMASK_W
),
294 ureg_negate(ureg_scalar(ureg_src(t_vtex
), TGSI_SWIZZLE_Z
)),
295 ureg_imm1f(shader
, 0.0f
), ureg_imm1f(shader
, 1.0f
));
297 ureg_fixup_label(shader
, label
, ureg_get_instruction_number(shader
));
301 ureg_release_temporary(shader
, t_vtex
);
302 ureg_release_temporary(shader
, t_vpos
);
306 return ureg_create_shader_and_destroy(shader
, r
->pipe
);
310 create_ycbcr_frag_shader(struct vl_mc
*r
, float scale
, bool invert
,
311 vl_mc_ycbcr_frag_shader fs_callback
, void *callback_priv
)
313 struct ureg_program
*shader
;
314 struct ureg_src flags
;
316 struct ureg_dst fragment
;
319 shader
= ureg_create(TGSI_PROCESSOR_FRAGMENT
);
323 flags
= ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_FLAGS
, TGSI_INTERPOLATE_LINEAR
);
325 fragment
= ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, 0);
327 tmp
= calc_line(shader
);
333 * fragment.xyz = tex(tc, sampler) * scale + tc.z
338 ureg_SEQ(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_Y
),
339 ureg_scalar(flags
, TGSI_SWIZZLE_W
), ureg_src(tmp
));
341 ureg_IF(shader
, ureg_scalar(ureg_src(tmp
), TGSI_SWIZZLE_Y
), &label
);
345 ureg_fixup_label(shader
, label
, ureg_get_instruction_number(shader
));
346 ureg_ELSE(shader
, &label
);
348 fs_callback(callback_priv
, r
, shader
, VS_O_VTEX
, tmp
);
351 ureg_MAD(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_XYZ
),
352 ureg_src(tmp
), ureg_imm1f(shader
, scale
),
353 ureg_scalar(flags
, TGSI_SWIZZLE_Z
));
355 ureg_ADD(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_XYZ
),
356 ureg_src(tmp
), ureg_scalar(flags
, TGSI_SWIZZLE_Z
));
358 ureg_MUL(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_XYZ
), ureg_src(tmp
), ureg_imm1f(shader
, invert
? -1.0f
: 1.0f
));
359 ureg_MOV(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_W
), ureg_imm1f(shader
, 1.0f
));
361 ureg_fixup_label(shader
, label
, ureg_get_instruction_number(shader
));
364 ureg_release_temporary(shader
, tmp
);
368 return ureg_create_shader_and_destroy(shader
, r
->pipe
);
372 init_pipe_state(struct vl_mc
*r
)
374 struct pipe_sampler_state sampler
;
375 struct pipe_blend_state blend
;
376 struct pipe_rasterizer_state rs_state
;
381 memset(&sampler
, 0, sizeof(sampler
));
382 sampler
.wrap_s
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
383 sampler
.wrap_t
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
384 sampler
.wrap_r
= PIPE_TEX_WRAP_CLAMP_TO_BORDER
;
385 sampler
.min_img_filter
= PIPE_TEX_FILTER_LINEAR
;
386 sampler
.min_mip_filter
= PIPE_TEX_MIPFILTER_NONE
;
387 sampler
.mag_img_filter
= PIPE_TEX_FILTER_LINEAR
;
388 sampler
.compare_mode
= PIPE_TEX_COMPARE_NONE
;
389 sampler
.compare_func
= PIPE_FUNC_ALWAYS
;
390 sampler
.normalized_coords
= 1;
391 r
->sampler_ref
= r
->pipe
->create_sampler_state(r
->pipe
, &sampler
);
393 goto error_sampler_ref
;
395 for (i
= 0; i
< VL_MC_NUM_BLENDERS
; ++i
) {
396 memset(&blend
, 0, sizeof blend
);
397 blend
.independent_blend_enable
= 0;
398 blend
.rt
[0].blend_enable
= 1;
399 blend
.rt
[0].rgb_func
= PIPE_BLEND_ADD
;
400 blend
.rt
[0].rgb_src_factor
= PIPE_BLENDFACTOR_SRC_ALPHA
;
401 blend
.rt
[0].rgb_dst_factor
= PIPE_BLENDFACTOR_ZERO
;
402 blend
.rt
[0].alpha_func
= PIPE_BLEND_ADD
;
403 blend
.rt
[0].alpha_src_factor
= PIPE_BLENDFACTOR_SRC_ALPHA
;
404 blend
.rt
[0].alpha_dst_factor
= PIPE_BLENDFACTOR_ZERO
;
405 blend
.logicop_enable
= 0;
406 blend
.logicop_func
= PIPE_LOGICOP_CLEAR
;
407 blend
.rt
[0].colormask
= i
;
409 r
->blend_clear
[i
] = r
->pipe
->create_blend_state(r
->pipe
, &blend
);
410 if (!r
->blend_clear
[i
])
413 blend
.rt
[0].rgb_dst_factor
= PIPE_BLENDFACTOR_ONE
;
414 blend
.rt
[0].alpha_dst_factor
= PIPE_BLENDFACTOR_ONE
;
415 r
->blend_add
[i
] = r
->pipe
->create_blend_state(r
->pipe
, &blend
);
416 if (!r
->blend_add
[i
])
419 blend
.rt
[0].rgb_func
= PIPE_BLEND_REVERSE_SUBTRACT
;
420 blend
.rt
[0].alpha_dst_factor
= PIPE_BLEND_REVERSE_SUBTRACT
;
421 r
->blend_sub
[i
] = r
->pipe
->create_blend_state(r
->pipe
, &blend
);
422 if (!r
->blend_sub
[i
])
426 memset(&rs_state
, 0, sizeof(rs_state
));
427 /*rs_state.sprite_coord_enable */
428 rs_state
.sprite_coord_mode
= PIPE_SPRITE_COORD_UPPER_LEFT
;
429 rs_state
.point_quad_rasterization
= true;
430 rs_state
.point_size
= VL_BLOCK_WIDTH
;
431 rs_state
.half_pixel_center
= true;
432 rs_state
.bottom_edge_rule
= true;
433 rs_state
.depth_clip
= 1;
434 r
->rs_state
= r
->pipe
->create_rasterizer_state(r
->pipe
, &rs_state
);
442 for (i
= 0; i
< VL_MC_NUM_BLENDERS
; ++i
) {
444 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_sub
[i
]);
447 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_add
[i
]);
449 if (r
->blend_clear
[i
])
450 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_clear
[i
]);
453 r
->pipe
->delete_sampler_state(r
->pipe
, r
->sampler_ref
);
460 cleanup_pipe_state(struct vl_mc
*r
)
466 r
->pipe
->delete_sampler_state(r
->pipe
, r
->sampler_ref
);
467 for (i
= 0; i
< VL_MC_NUM_BLENDERS
; ++i
) {
468 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_clear
[i
]);
469 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_add
[i
]);
470 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_sub
[i
]);
472 r
->pipe
->delete_rasterizer_state(r
->pipe
, r
->rs_state
);
476 vl_mc_init(struct vl_mc
*renderer
, struct pipe_context
*pipe
,
477 unsigned buffer_width
, unsigned buffer_height
,
478 unsigned macroblock_size
, float scale
,
479 vl_mc_ycbcr_vert_shader vs_callback
,
480 vl_mc_ycbcr_frag_shader fs_callback
,
486 memset(renderer
, 0, sizeof(struct vl_mc
));
488 renderer
->pipe
= pipe
;
489 renderer
->buffer_width
= buffer_width
;
490 renderer
->buffer_height
= buffer_height
;
491 renderer
->macroblock_size
= macroblock_size
;
493 if (!init_pipe_state(renderer
))
494 goto error_pipe_state
;
496 renderer
->vs_ref
= create_ref_vert_shader(renderer
);
497 if (!renderer
->vs_ref
)
500 renderer
->vs_ycbcr
= create_ycbcr_vert_shader(renderer
, vs_callback
, callback_priv
);
501 if (!renderer
->vs_ycbcr
)
504 renderer
->fs_ref
= create_ref_frag_shader(renderer
);
505 if (!renderer
->fs_ref
)
508 renderer
->fs_ycbcr
= create_ycbcr_frag_shader(renderer
, scale
, false, fs_callback
, callback_priv
);
509 if (!renderer
->fs_ycbcr
)
512 renderer
->fs_ycbcr_sub
= create_ycbcr_frag_shader(renderer
, scale
, true, fs_callback
, callback_priv
);
513 if (!renderer
->fs_ycbcr_sub
)
514 goto error_fs_ycbcr_sub
;
519 renderer
->pipe
->delete_fs_state(renderer
->pipe
, renderer
->fs_ycbcr
);
522 renderer
->pipe
->delete_fs_state(renderer
->pipe
, renderer
->fs_ref
);
525 renderer
->pipe
->delete_vs_state(renderer
->pipe
, renderer
->vs_ycbcr
);
528 renderer
->pipe
->delete_vs_state(renderer
->pipe
, renderer
->vs_ref
);
531 cleanup_pipe_state(renderer
);
538 vl_mc_cleanup(struct vl_mc
*renderer
)
542 cleanup_pipe_state(renderer
);
544 renderer
->pipe
->delete_vs_state(renderer
->pipe
, renderer
->vs_ref
);
545 renderer
->pipe
->delete_vs_state(renderer
->pipe
, renderer
->vs_ycbcr
);
546 renderer
->pipe
->delete_fs_state(renderer
->pipe
, renderer
->fs_ref
);
547 renderer
->pipe
->delete_fs_state(renderer
->pipe
, renderer
->fs_ycbcr
);
548 renderer
->pipe
->delete_fs_state(renderer
->pipe
, renderer
->fs_ycbcr_sub
);
552 vl_mc_init_buffer(struct vl_mc
*renderer
, struct vl_mc_buffer
*buffer
)
554 assert(renderer
&& buffer
);
556 buffer
->viewport
.scale
[2] = 1;
557 buffer
->viewport
.scale
[3] = 1;
558 buffer
->viewport
.translate
[0] = 0;
559 buffer
->viewport
.translate
[1] = 0;
560 buffer
->viewport
.translate
[2] = 0;
561 buffer
->viewport
.translate
[3] = 0;
563 buffer
->fb_state
.nr_cbufs
= 1;
564 buffer
->fb_state
.zsbuf
= NULL
;
570 vl_mc_cleanup_buffer(struct vl_mc_buffer
*buffer
)
576 vl_mc_set_surface(struct vl_mc_buffer
*buffer
, struct pipe_surface
*surface
)
578 assert(buffer
&& surface
);
580 buffer
->surface_cleared
= false;
582 buffer
->viewport
.scale
[0] = surface
->width
;
583 buffer
->viewport
.scale
[1] = surface
->height
;
585 buffer
->fb_state
.width
= surface
->width
;
586 buffer
->fb_state
.height
= surface
->height
;
587 buffer
->fb_state
.cbufs
[0] = surface
;
591 prepare_pipe_4_rendering(struct vl_mc
*renderer
, struct vl_mc_buffer
*buffer
, unsigned mask
)
595 renderer
->pipe
->bind_rasterizer_state(renderer
->pipe
, renderer
->rs_state
);
597 if (buffer
->surface_cleared
)
598 renderer
->pipe
->bind_blend_state(renderer
->pipe
, renderer
->blend_add
[mask
]);
600 renderer
->pipe
->bind_blend_state(renderer
->pipe
, renderer
->blend_clear
[mask
]);
602 renderer
->pipe
->set_framebuffer_state(renderer
->pipe
, &buffer
->fb_state
);
603 renderer
->pipe
->set_viewport_state(renderer
->pipe
, &buffer
->viewport
);
607 vl_mc_render_ref(struct vl_mc
*renderer
, struct vl_mc_buffer
*buffer
, struct pipe_sampler_view
*ref
)
609 assert(buffer
&& ref
);
611 prepare_pipe_4_rendering(renderer
, buffer
, PIPE_MASK_R
| PIPE_MASK_G
| PIPE_MASK_B
);
613 renderer
->pipe
->bind_vs_state(renderer
->pipe
, renderer
->vs_ref
);
614 renderer
->pipe
->bind_fs_state(renderer
->pipe
, renderer
->fs_ref
);
616 renderer
->pipe
->set_fragment_sampler_views(renderer
->pipe
, 1, &ref
);
617 renderer
->pipe
->bind_fragment_sampler_states(renderer
->pipe
, 1, &renderer
->sampler_ref
);
619 util_draw_arrays_instanced(renderer
->pipe
, PIPE_PRIM_QUADS
, 0, 4, 0,
620 renderer
->buffer_width
/ VL_MACROBLOCK_WIDTH
*
621 renderer
->buffer_height
/ VL_MACROBLOCK_HEIGHT
);
623 buffer
->surface_cleared
= true;
627 vl_mc_render_ycbcr(struct vl_mc
*renderer
, struct vl_mc_buffer
*buffer
, unsigned component
, unsigned num_instances
)
629 unsigned mask
= 1 << component
;
633 if (num_instances
== 0)
636 prepare_pipe_4_rendering(renderer
, buffer
, mask
);
638 renderer
->pipe
->bind_vs_state(renderer
->pipe
, renderer
->vs_ycbcr
);
639 renderer
->pipe
->bind_fs_state(renderer
->pipe
, renderer
->fs_ycbcr
);
641 util_draw_arrays_instanced(renderer
->pipe
, PIPE_PRIM_QUADS
, 0, 4, 0, num_instances
);
643 if (buffer
->surface_cleared
) {
644 renderer
->pipe
->bind_blend_state(renderer
->pipe
, renderer
->blend_sub
[mask
]);
645 renderer
->pipe
->bind_fs_state(renderer
->pipe
, renderer
->fs_ycbcr_sub
);
646 util_draw_arrays_instanced(renderer
->pipe
, PIPE_PRIM_QUADS
, 0, 4, 0, num_instances
);