1 /**************************************************************************
3 * Copyright 2009 Younes Manton.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 #include "pipe/p_context.h"
32 #include "util/u_sampler.h"
33 #include "util/u_draw.h"
35 #include "tgsi/tgsi_ureg.h"
37 #include "vl_defines.h"
38 #include "vl_vertex_buffers.h"
48 VS_O_FLAGS
= VS_O_VTOP
,
49 VS_O_VTEX
= VS_O_VBOTTOM
52 static struct ureg_dst
53 calc_position(struct vl_mc
*r
, struct ureg_program
*shader
, struct ureg_src block_scale
)
55 struct ureg_src vrect
, vpos
;
56 struct ureg_dst t_vpos
;
57 struct ureg_dst o_vpos
;
59 vrect
= ureg_DECL_vs_input(shader
, VS_I_RECT
);
60 vpos
= ureg_DECL_vs_input(shader
, VS_I_VPOS
);
62 t_vpos
= ureg_DECL_temporary(shader
);
64 o_vpos
= ureg_DECL_output(shader
, TGSI_SEMANTIC_POSITION
, VS_O_VPOS
);
67 * block_scale = (VL_MACROBLOCK_WIDTH, VL_MACROBLOCK_HEIGHT) / (dst.width, dst.height)
69 * t_vpos = (vpos + vrect) * block_scale
73 ureg_ADD(shader
, ureg_writemask(t_vpos
, TGSI_WRITEMASK_XY
), vpos
, vrect
);
74 ureg_MUL(shader
, ureg_writemask(t_vpos
, TGSI_WRITEMASK_XY
), ureg_src(t_vpos
), block_scale
);
75 ureg_MOV(shader
, ureg_writemask(o_vpos
, TGSI_WRITEMASK_XY
), ureg_src(t_vpos
));
76 ureg_MOV(shader
, ureg_writemask(o_vpos
, TGSI_WRITEMASK_ZW
), ureg_imm1f(shader
, 1.0f
));
81 static struct ureg_dst
82 calc_line(struct pipe_screen
*screen
, struct ureg_program
*shader
)
87 tmp
= ureg_DECL_temporary(shader
);
89 if (screen
->get_param(screen
, PIPE_CAP_TGSI_FS_POSITION_IS_SYSVAL
))
90 pos
= ureg_DECL_system_value(shader
, TGSI_SEMANTIC_POSITION
, 0);
92 pos
= ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_POSITION
, VS_O_VPOS
,
93 TGSI_INTERPOLATE_LINEAR
);
96 * tmp.y = fraction(pos.y / 2) >= 0.5 ? 1 : 0
98 ureg_MUL(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_Y
), pos
, ureg_imm1f(shader
, 0.5f
));
99 ureg_FRC(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_Y
), ureg_src(tmp
));
100 ureg_SGE(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_Y
), ureg_src(tmp
), ureg_imm1f(shader
, 0.5f
));
106 create_ref_vert_shader(struct vl_mc
*r
)
108 struct ureg_program
*shader
;
109 struct ureg_src mv_scale
;
110 struct ureg_src vmv
[2];
111 struct ureg_dst t_vpos
;
112 struct ureg_dst o_vmv
[2];
115 shader
= ureg_create(TGSI_PROCESSOR_VERTEX
);
119 vmv
[0] = ureg_DECL_vs_input(shader
, VS_I_MV_TOP
);
120 vmv
[1] = ureg_DECL_vs_input(shader
, VS_I_MV_BOTTOM
);
122 t_vpos
= calc_position(r
, shader
, ureg_imm2f(shader
,
123 (float)VL_MACROBLOCK_WIDTH
/ r
->buffer_width
,
124 (float)VL_MACROBLOCK_HEIGHT
/ r
->buffer_height
)
127 o_vmv
[0] = ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VTOP
);
128 o_vmv
[1] = ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VBOTTOM
);
131 * mv_scale.xy = 0.5 / (dst.width, dst.height);
132 * mv_scale.z = 1.0f / 4.0f
133 * mv_scale.w = 1.0f / 255.0f
135 * // Apply motion vectors
136 * o_vmv[0..1].xy = vmv[0..1] * mv_scale + t_vpos
137 * o_vmv[0..1].zw = vmv[0..1] * mv_scale
141 mv_scale
= ureg_imm4f(shader
,
142 0.5f
/ r
->buffer_width
,
143 0.5f
/ r
->buffer_height
,
145 1.0f
/ PIPE_VIDEO_MV_WEIGHT_MAX
);
147 for (i
= 0; i
< 2; ++i
) {
148 ureg_MAD(shader
, ureg_writemask(o_vmv
[i
], TGSI_WRITEMASK_XY
), mv_scale
, vmv
[i
], ureg_src(t_vpos
));
149 ureg_MUL(shader
, ureg_writemask(o_vmv
[i
], TGSI_WRITEMASK_ZW
), mv_scale
, vmv
[i
]);
152 ureg_release_temporary(shader
, t_vpos
);
156 return ureg_create_shader_and_destroy(shader
, r
->pipe
);
160 create_ref_frag_shader(struct vl_mc
*r
)
162 const float y_scale
=
163 r
->buffer_height
/ 2 *
164 r
->macroblock_size
/ VL_MACROBLOCK_HEIGHT
;
166 struct ureg_program
*shader
;
167 struct ureg_src tc
[2], sampler
;
168 struct ureg_dst ref
, field
;
169 struct ureg_dst fragment
;
172 shader
= ureg_create(TGSI_PROCESSOR_FRAGMENT
);
176 tc
[0] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VTOP
, TGSI_INTERPOLATE_LINEAR
);
177 tc
[1] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VBOTTOM
, TGSI_INTERPOLATE_LINEAR
);
179 sampler
= ureg_DECL_sampler(shader
, 0);
180 ref
= ureg_DECL_temporary(shader
);
182 fragment
= ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, 0);
184 field
= calc_line(r
->pipe
->screen
, shader
);
187 * ref = field.z ? tc[1] : tc[0]
189 * // Adjust tc acording to top/bottom field selection
192 * ref.y = floor(ref.y)
196 * fragment.xyz = tex(ref, sampler[0])
198 ureg_CMP(shader
, ureg_writemask(ref
, TGSI_WRITEMASK_XYZ
),
199 ureg_negate(ureg_scalar(ureg_src(field
), TGSI_SWIZZLE_Y
)),
201 ureg_CMP(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_W
),
202 ureg_negate(ureg_scalar(ureg_src(field
), TGSI_SWIZZLE_Y
)),
205 ureg_IF(shader
, ureg_scalar(ureg_src(ref
), TGSI_SWIZZLE_Z
), &label
);
207 ureg_MUL(shader
, ureg_writemask(ref
, TGSI_WRITEMASK_Y
),
208 ureg_src(ref
), ureg_imm1f(shader
, y_scale
));
209 ureg_FLR(shader
, ureg_writemask(ref
, TGSI_WRITEMASK_Y
), ureg_src(ref
));
210 ureg_ADD(shader
, ureg_writemask(ref
, TGSI_WRITEMASK_Y
),
211 ureg_src(ref
), ureg_scalar(ureg_src(ref
), TGSI_SWIZZLE_Z
));
212 ureg_MUL(shader
, ureg_writemask(ref
, TGSI_WRITEMASK_Y
),
213 ureg_src(ref
), ureg_imm1f(shader
, 1.0f
/ y_scale
));
215 ureg_fixup_label(shader
, label
, ureg_get_instruction_number(shader
));
218 ureg_TEX(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_XYZ
), TGSI_TEXTURE_2D
, ureg_src(ref
), sampler
);
220 ureg_release_temporary(shader
, ref
);
222 ureg_release_temporary(shader
, field
);
225 return ureg_create_shader_and_destroy(shader
, r
->pipe
);
229 create_ycbcr_vert_shader(struct vl_mc
*r
, vl_mc_ycbcr_vert_shader vs_callback
, void *callback_priv
)
231 struct ureg_program
*shader
;
233 struct ureg_src vrect
, vpos
;
234 struct ureg_dst t_vpos
, t_vtex
;
235 struct ureg_dst o_vpos
, o_flags
;
237 struct vertex2f scale
= {
238 (float)VL_BLOCK_WIDTH
/ r
->buffer_width
* VL_MACROBLOCK_WIDTH
/ r
->macroblock_size
,
239 (float)VL_BLOCK_HEIGHT
/ r
->buffer_height
* VL_MACROBLOCK_HEIGHT
/ r
->macroblock_size
244 shader
= ureg_create(TGSI_PROCESSOR_VERTEX
);
248 vrect
= ureg_DECL_vs_input(shader
, VS_I_RECT
);
249 vpos
= ureg_DECL_vs_input(shader
, VS_I_VPOS
);
251 t_vpos
= calc_position(r
, shader
, ureg_imm2f(shader
, scale
.x
, scale
.y
));
252 t_vtex
= ureg_DECL_temporary(shader
);
254 o_vpos
= ureg_DECL_output(shader
, TGSI_SEMANTIC_POSITION
, VS_O_VPOS
);
255 o_flags
= ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_FLAGS
);
259 * o_flags.z = intra * 0.5
262 * t_vtex.xy = vrect.y ? { 0, scale.y } : { -scale.y : 0 }
263 * t_vtex.z = vpos.y % 2
264 * t_vtex.y = t_vtex.z ? t_vtex.x : t_vtex.y
265 * o_vpos.y = t_vtex.y + t_vpos.y
267 * o_flags.w = t_vtex.z ? 0 : 1
272 vs_callback(callback_priv
, r
, shader
, VS_O_VTEX
, t_vpos
);
274 ureg_MUL(shader
, ureg_writemask(o_flags
, TGSI_WRITEMASK_Z
),
275 ureg_scalar(vpos
, TGSI_SWIZZLE_Z
), ureg_imm1f(shader
, 0.5f
));
276 ureg_MOV(shader
, ureg_writemask(o_flags
, TGSI_WRITEMASK_W
), ureg_imm1f(shader
, -1.0f
));
278 if (r
->macroblock_size
== VL_MACROBLOCK_HEIGHT
) { //TODO
279 ureg_IF(shader
, ureg_scalar(vpos
, TGSI_SWIZZLE_W
), &label
);
281 ureg_CMP(shader
, ureg_writemask(t_vtex
, TGSI_WRITEMASK_XY
),
282 ureg_negate(ureg_scalar(vrect
, TGSI_SWIZZLE_Y
)),
283 ureg_imm2f(shader
, 0.0f
, scale
.y
),
284 ureg_imm2f(shader
, -scale
.y
, 0.0f
));
285 ureg_MUL(shader
, ureg_writemask(t_vtex
, TGSI_WRITEMASK_Z
),
286 ureg_scalar(vpos
, TGSI_SWIZZLE_Y
), ureg_imm1f(shader
, 0.5f
));
288 ureg_FRC(shader
, ureg_writemask(t_vtex
, TGSI_WRITEMASK_Z
), ureg_src(t_vtex
));
290 ureg_CMP(shader
, ureg_writemask(t_vtex
, TGSI_WRITEMASK_Y
),
291 ureg_negate(ureg_scalar(ureg_src(t_vtex
), TGSI_SWIZZLE_Z
)),
292 ureg_scalar(ureg_src(t_vtex
), TGSI_SWIZZLE_X
),
293 ureg_scalar(ureg_src(t_vtex
), TGSI_SWIZZLE_Y
));
294 ureg_ADD(shader
, ureg_writemask(o_vpos
, TGSI_WRITEMASK_Y
),
295 ureg_src(t_vpos
), ureg_src(t_vtex
));
297 ureg_CMP(shader
, ureg_writemask(o_flags
, TGSI_WRITEMASK_W
),
298 ureg_negate(ureg_scalar(ureg_src(t_vtex
), TGSI_SWIZZLE_Z
)),
299 ureg_imm1f(shader
, 0.0f
), ureg_imm1f(shader
, 1.0f
));
301 ureg_fixup_label(shader
, label
, ureg_get_instruction_number(shader
));
305 ureg_release_temporary(shader
, t_vtex
);
306 ureg_release_temporary(shader
, t_vpos
);
310 return ureg_create_shader_and_destroy(shader
, r
->pipe
);
314 create_ycbcr_frag_shader(struct vl_mc
*r
, float scale
, bool invert
,
315 vl_mc_ycbcr_frag_shader fs_callback
, void *callback_priv
)
317 struct ureg_program
*shader
;
318 struct ureg_src flags
;
320 struct ureg_dst fragment
;
323 shader
= ureg_create(TGSI_PROCESSOR_FRAGMENT
);
327 flags
= ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_FLAGS
, TGSI_INTERPOLATE_LINEAR
);
329 fragment
= ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, 0);
331 tmp
= calc_line(r
->pipe
->screen
, shader
);
337 * fragment.xyz = tex(tc, sampler) * scale + tc.z
342 ureg_SEQ(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_Y
),
343 ureg_scalar(flags
, TGSI_SWIZZLE_W
), ureg_src(tmp
));
345 ureg_IF(shader
, ureg_scalar(ureg_src(tmp
), TGSI_SWIZZLE_Y
), &label
);
349 ureg_fixup_label(shader
, label
, ureg_get_instruction_number(shader
));
350 ureg_ELSE(shader
, &label
);
352 fs_callback(callback_priv
, r
, shader
, VS_O_VTEX
, tmp
);
355 ureg_MAD(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_XYZ
),
356 ureg_src(tmp
), ureg_imm1f(shader
, scale
),
357 ureg_scalar(flags
, TGSI_SWIZZLE_Z
));
359 ureg_ADD(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_XYZ
),
360 ureg_src(tmp
), ureg_scalar(flags
, TGSI_SWIZZLE_Z
));
362 ureg_MUL(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_XYZ
), ureg_src(tmp
), ureg_imm1f(shader
, invert
? -1.0f
: 1.0f
));
363 ureg_MOV(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_W
), ureg_imm1f(shader
, 1.0f
));
365 ureg_fixup_label(shader
, label
, ureg_get_instruction_number(shader
));
368 ureg_release_temporary(shader
, tmp
);
372 return ureg_create_shader_and_destroy(shader
, r
->pipe
);
376 init_pipe_state(struct vl_mc
*r
)
378 struct pipe_sampler_state sampler
;
379 struct pipe_blend_state blend
;
380 struct pipe_rasterizer_state rs_state
;
385 memset(&sampler
, 0, sizeof(sampler
));
386 sampler
.wrap_s
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
387 sampler
.wrap_t
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
388 sampler
.wrap_r
= PIPE_TEX_WRAP_CLAMP_TO_BORDER
;
389 sampler
.min_img_filter
= PIPE_TEX_FILTER_LINEAR
;
390 sampler
.min_mip_filter
= PIPE_TEX_MIPFILTER_NONE
;
391 sampler
.mag_img_filter
= PIPE_TEX_FILTER_LINEAR
;
392 sampler
.compare_mode
= PIPE_TEX_COMPARE_NONE
;
393 sampler
.compare_func
= PIPE_FUNC_ALWAYS
;
394 sampler
.normalized_coords
= 1;
395 r
->sampler_ref
= r
->pipe
->create_sampler_state(r
->pipe
, &sampler
);
397 goto error_sampler_ref
;
399 for (i
= 0; i
< VL_MC_NUM_BLENDERS
; ++i
) {
400 memset(&blend
, 0, sizeof blend
);
401 blend
.independent_blend_enable
= 0;
402 blend
.rt
[0].blend_enable
= 1;
403 blend
.rt
[0].rgb_func
= PIPE_BLEND_ADD
;
404 blend
.rt
[0].rgb_src_factor
= PIPE_BLENDFACTOR_SRC_ALPHA
;
405 blend
.rt
[0].rgb_dst_factor
= PIPE_BLENDFACTOR_ZERO
;
406 blend
.rt
[0].alpha_func
= PIPE_BLEND_ADD
;
407 blend
.rt
[0].alpha_src_factor
= PIPE_BLENDFACTOR_SRC_ALPHA
;
408 blend
.rt
[0].alpha_dst_factor
= PIPE_BLENDFACTOR_ZERO
;
409 blend
.logicop_enable
= 0;
410 blend
.logicop_func
= PIPE_LOGICOP_CLEAR
;
411 blend
.rt
[0].colormask
= i
;
413 r
->blend_clear
[i
] = r
->pipe
->create_blend_state(r
->pipe
, &blend
);
414 if (!r
->blend_clear
[i
])
417 blend
.rt
[0].rgb_dst_factor
= PIPE_BLENDFACTOR_ONE
;
418 blend
.rt
[0].alpha_dst_factor
= PIPE_BLENDFACTOR_ONE
;
419 r
->blend_add
[i
] = r
->pipe
->create_blend_state(r
->pipe
, &blend
);
420 if (!r
->blend_add
[i
])
423 blend
.rt
[0].rgb_func
= PIPE_BLEND_REVERSE_SUBTRACT
;
424 blend
.rt
[0].alpha_dst_factor
= PIPE_BLEND_REVERSE_SUBTRACT
;
425 r
->blend_sub
[i
] = r
->pipe
->create_blend_state(r
->pipe
, &blend
);
426 if (!r
->blend_sub
[i
])
430 memset(&rs_state
, 0, sizeof(rs_state
));
431 /*rs_state.sprite_coord_enable */
432 rs_state
.sprite_coord_mode
= PIPE_SPRITE_COORD_UPPER_LEFT
;
433 rs_state
.point_quad_rasterization
= true;
434 rs_state
.point_size
= VL_BLOCK_WIDTH
;
435 rs_state
.half_pixel_center
= true;
436 rs_state
.bottom_edge_rule
= true;
437 rs_state
.depth_clip
= 1;
438 r
->rs_state
= r
->pipe
->create_rasterizer_state(r
->pipe
, &rs_state
);
446 for (i
= 0; i
< VL_MC_NUM_BLENDERS
; ++i
) {
448 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_sub
[i
]);
451 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_add
[i
]);
453 if (r
->blend_clear
[i
])
454 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_clear
[i
]);
457 r
->pipe
->delete_sampler_state(r
->pipe
, r
->sampler_ref
);
464 cleanup_pipe_state(struct vl_mc
*r
)
470 r
->pipe
->delete_sampler_state(r
->pipe
, r
->sampler_ref
);
471 for (i
= 0; i
< VL_MC_NUM_BLENDERS
; ++i
) {
472 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_clear
[i
]);
473 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_add
[i
]);
474 r
->pipe
->delete_blend_state(r
->pipe
, r
->blend_sub
[i
]);
476 r
->pipe
->delete_rasterizer_state(r
->pipe
, r
->rs_state
);
480 vl_mc_init(struct vl_mc
*renderer
, struct pipe_context
*pipe
,
481 unsigned buffer_width
, unsigned buffer_height
,
482 unsigned macroblock_size
, float scale
,
483 vl_mc_ycbcr_vert_shader vs_callback
,
484 vl_mc_ycbcr_frag_shader fs_callback
,
490 memset(renderer
, 0, sizeof(struct vl_mc
));
492 renderer
->pipe
= pipe
;
493 renderer
->buffer_width
= buffer_width
;
494 renderer
->buffer_height
= buffer_height
;
495 renderer
->macroblock_size
= macroblock_size
;
497 if (!init_pipe_state(renderer
))
498 goto error_pipe_state
;
500 renderer
->vs_ref
= create_ref_vert_shader(renderer
);
501 if (!renderer
->vs_ref
)
504 renderer
->vs_ycbcr
= create_ycbcr_vert_shader(renderer
, vs_callback
, callback_priv
);
505 if (!renderer
->vs_ycbcr
)
508 renderer
->fs_ref
= create_ref_frag_shader(renderer
);
509 if (!renderer
->fs_ref
)
512 renderer
->fs_ycbcr
= create_ycbcr_frag_shader(renderer
, scale
, false, fs_callback
, callback_priv
);
513 if (!renderer
->fs_ycbcr
)
516 renderer
->fs_ycbcr_sub
= create_ycbcr_frag_shader(renderer
, scale
, true, fs_callback
, callback_priv
);
517 if (!renderer
->fs_ycbcr_sub
)
518 goto error_fs_ycbcr_sub
;
523 renderer
->pipe
->delete_fs_state(renderer
->pipe
, renderer
->fs_ycbcr
);
526 renderer
->pipe
->delete_fs_state(renderer
->pipe
, renderer
->fs_ref
);
529 renderer
->pipe
->delete_vs_state(renderer
->pipe
, renderer
->vs_ycbcr
);
532 renderer
->pipe
->delete_vs_state(renderer
->pipe
, renderer
->vs_ref
);
535 cleanup_pipe_state(renderer
);
542 vl_mc_cleanup(struct vl_mc
*renderer
)
546 cleanup_pipe_state(renderer
);
548 renderer
->pipe
->delete_vs_state(renderer
->pipe
, renderer
->vs_ref
);
549 renderer
->pipe
->delete_vs_state(renderer
->pipe
, renderer
->vs_ycbcr
);
550 renderer
->pipe
->delete_fs_state(renderer
->pipe
, renderer
->fs_ref
);
551 renderer
->pipe
->delete_fs_state(renderer
->pipe
, renderer
->fs_ycbcr
);
552 renderer
->pipe
->delete_fs_state(renderer
->pipe
, renderer
->fs_ycbcr_sub
);
556 vl_mc_init_buffer(struct vl_mc
*renderer
, struct vl_mc_buffer
*buffer
)
558 assert(renderer
&& buffer
);
560 buffer
->viewport
.scale
[2] = 1;
561 buffer
->viewport
.translate
[0] = 0;
562 buffer
->viewport
.translate
[1] = 0;
563 buffer
->viewport
.translate
[2] = 0;
565 buffer
->fb_state
.nr_cbufs
= 1;
566 buffer
->fb_state
.zsbuf
= NULL
;
572 vl_mc_cleanup_buffer(struct vl_mc_buffer
*buffer
)
578 vl_mc_set_surface(struct vl_mc_buffer
*buffer
, struct pipe_surface
*surface
)
580 assert(buffer
&& surface
);
582 buffer
->surface_cleared
= false;
584 buffer
->viewport
.scale
[0] = surface
->width
;
585 buffer
->viewport
.scale
[1] = surface
->height
;
587 buffer
->fb_state
.width
= surface
->width
;
588 buffer
->fb_state
.height
= surface
->height
;
589 buffer
->fb_state
.cbufs
[0] = surface
;
593 prepare_pipe_4_rendering(struct vl_mc
*renderer
, struct vl_mc_buffer
*buffer
, unsigned mask
)
597 renderer
->pipe
->bind_rasterizer_state(renderer
->pipe
, renderer
->rs_state
);
599 if (buffer
->surface_cleared
)
600 renderer
->pipe
->bind_blend_state(renderer
->pipe
, renderer
->blend_add
[mask
]);
602 renderer
->pipe
->bind_blend_state(renderer
->pipe
, renderer
->blend_clear
[mask
]);
604 renderer
->pipe
->set_framebuffer_state(renderer
->pipe
, &buffer
->fb_state
);
605 renderer
->pipe
->set_viewport_states(renderer
->pipe
, 0, 1, &buffer
->viewport
);
609 vl_mc_render_ref(struct vl_mc
*renderer
, struct vl_mc_buffer
*buffer
, struct pipe_sampler_view
*ref
)
611 assert(buffer
&& ref
);
613 prepare_pipe_4_rendering(renderer
, buffer
, PIPE_MASK_R
| PIPE_MASK_G
| PIPE_MASK_B
);
615 renderer
->pipe
->bind_vs_state(renderer
->pipe
, renderer
->vs_ref
);
616 renderer
->pipe
->bind_fs_state(renderer
->pipe
, renderer
->fs_ref
);
618 renderer
->pipe
->set_sampler_views(renderer
->pipe
, PIPE_SHADER_FRAGMENT
,
620 renderer
->pipe
->bind_sampler_states(renderer
->pipe
, PIPE_SHADER_FRAGMENT
,
621 0, 1, &renderer
->sampler_ref
);
623 util_draw_arrays_instanced(renderer
->pipe
, PIPE_PRIM_QUADS
, 0, 4, 0,
624 renderer
->buffer_width
/ VL_MACROBLOCK_WIDTH
*
625 renderer
->buffer_height
/ VL_MACROBLOCK_HEIGHT
);
627 buffer
->surface_cleared
= true;
631 vl_mc_render_ycbcr(struct vl_mc
*renderer
, struct vl_mc_buffer
*buffer
, unsigned component
, unsigned num_instances
)
633 unsigned mask
= 1 << component
;
637 if (num_instances
== 0)
640 prepare_pipe_4_rendering(renderer
, buffer
, mask
);
642 renderer
->pipe
->bind_vs_state(renderer
->pipe
, renderer
->vs_ycbcr
);
643 renderer
->pipe
->bind_fs_state(renderer
->pipe
, renderer
->fs_ycbcr
);
645 util_draw_arrays_instanced(renderer
->pipe
, PIPE_PRIM_QUADS
, 0, 4, 0, num_instances
);
647 if (buffer
->surface_cleared
) {
648 renderer
->pipe
->bind_blend_state(renderer
->pipe
, renderer
->blend_sub
[mask
]);
649 renderer
->pipe
->bind_fs_state(renderer
->pipe
, renderer
->fs_ycbcr_sub
);
650 util_draw_arrays_instanced(renderer
->pipe
, PIPE_PRIM_QUADS
, 0, 4, 0, num_instances
);