1 /**************************************************************************
3 * Copyright 2009 Younes Manton.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 #include "pipe/p_compiler.h"
31 #include "pipe/p_context.h"
33 #include "util/u_memory.h"
34 #include "util/u_draw.h"
35 #include "util/u_surface.h"
36 #include "util/u_upload_mgr.h"
38 #include "tgsi/tgsi_ureg.h"
42 #include "vl_compositor.h"
45 #define MAX_DIRTY (1 << 15)
57 create_vert_shader(struct vl_compositor
*c
)
59 struct ureg_program
*shader
;
60 struct ureg_src vpos
, vtex
, color
;
62 struct ureg_dst o_vpos
, o_vtex
, o_color
;
63 struct ureg_dst o_vtop
, o_vbottom
;
65 shader
= ureg_create(PIPE_SHADER_VERTEX
);
69 vpos
= ureg_DECL_vs_input(shader
, 0);
70 vtex
= ureg_DECL_vs_input(shader
, 1);
71 color
= ureg_DECL_vs_input(shader
, 2);
72 tmp
= ureg_DECL_temporary(shader
);
73 o_vpos
= ureg_DECL_output(shader
, TGSI_SEMANTIC_POSITION
, VS_O_VPOS
);
74 o_color
= ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, VS_O_COLOR
);
75 o_vtex
= ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VTEX
);
76 o_vtop
= ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VTOP
);
77 o_vbottom
= ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VBOTTOM
);
84 ureg_MOV(shader
, o_vpos
, vpos
);
85 ureg_MOV(shader
, o_vtex
, vtex
);
86 ureg_MOV(shader
, o_color
, color
);
93 * o_vtop.y = vtex.y * tmp.x + 0.25f
94 * o_vtop.z = vtex.y * tmp.y + 0.25f
95 * o_vtop.w = 1 / tmp.x
97 * o_vbottom.x = vtex.x
98 * o_vbottom.y = vtex.y * tmp.x - 0.25f
99 * o_vbottom.z = vtex.y * tmp.y - 0.25f
100 * o_vbottom.w = 1 / tmp.y
102 ureg_MUL(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_X
),
103 ureg_scalar(vtex
, TGSI_SWIZZLE_W
), ureg_imm1f(shader
, 0.5f
));
104 ureg_MUL(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_Y
),
105 ureg_scalar(vtex
, TGSI_SWIZZLE_W
), ureg_imm1f(shader
, 0.25f
));
107 ureg_MOV(shader
, ureg_writemask(o_vtop
, TGSI_WRITEMASK_X
), vtex
);
108 ureg_MAD(shader
, ureg_writemask(o_vtop
, TGSI_WRITEMASK_Y
), ureg_scalar(vtex
, TGSI_SWIZZLE_Y
),
109 ureg_scalar(ureg_src(tmp
), TGSI_SWIZZLE_X
), ureg_imm1f(shader
, 0.25f
));
110 ureg_MAD(shader
, ureg_writemask(o_vtop
, TGSI_WRITEMASK_Z
), ureg_scalar(vtex
, TGSI_SWIZZLE_Y
),
111 ureg_scalar(ureg_src(tmp
), TGSI_SWIZZLE_Y
), ureg_imm1f(shader
, 0.25f
));
112 ureg_RCP(shader
, ureg_writemask(o_vtop
, TGSI_WRITEMASK_W
),
113 ureg_scalar(ureg_src(tmp
), TGSI_SWIZZLE_X
));
115 ureg_MOV(shader
, ureg_writemask(o_vbottom
, TGSI_WRITEMASK_X
), vtex
);
116 ureg_MAD(shader
, ureg_writemask(o_vbottom
, TGSI_WRITEMASK_Y
), ureg_scalar(vtex
, TGSI_SWIZZLE_Y
),
117 ureg_scalar(ureg_src(tmp
), TGSI_SWIZZLE_X
), ureg_imm1f(shader
, -0.25f
));
118 ureg_MAD(shader
, ureg_writemask(o_vbottom
, TGSI_WRITEMASK_Z
), ureg_scalar(vtex
, TGSI_SWIZZLE_Y
),
119 ureg_scalar(ureg_src(tmp
), TGSI_SWIZZLE_Y
), ureg_imm1f(shader
, -0.25f
));
120 ureg_RCP(shader
, ureg_writemask(o_vbottom
, TGSI_WRITEMASK_W
),
121 ureg_scalar(ureg_src(tmp
), TGSI_SWIZZLE_Y
));
125 return ureg_create_shader_and_destroy(shader
, c
->pipe
);
129 create_frag_shader_weave(struct ureg_program
*shader
, struct ureg_dst fragment
)
131 struct ureg_src i_tc
[2];
132 struct ureg_src sampler
[3];
133 struct ureg_dst t_tc
[2];
134 struct ureg_dst t_texel
[2];
137 i_tc
[0] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VTOP
, TGSI_INTERPOLATE_LINEAR
);
138 i_tc
[1] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VBOTTOM
, TGSI_INTERPOLATE_LINEAR
);
140 for (i
= 0; i
< 3; ++i
)
141 sampler
[i
] = ureg_DECL_sampler(shader
, i
);
143 for (i
= 0; i
< 2; ++i
) {
144 t_tc
[i
] = ureg_DECL_temporary(shader
);
145 t_texel
[i
] = ureg_DECL_temporary(shader
);
148 /* calculate the texture offsets
150 * t_tc.y = (round(i_tc.y - 0.5) + 0.5) / height * 2
152 for (i
= 0; i
< 2; ++i
) {
153 ureg_MOV(shader
, ureg_writemask(t_tc
[i
], TGSI_WRITEMASK_X
), i_tc
[i
]);
154 ureg_ADD(shader
, ureg_writemask(t_tc
[i
], TGSI_WRITEMASK_YZ
),
155 i_tc
[i
], ureg_imm1f(shader
, -0.5f
));
156 ureg_ROUND(shader
, ureg_writemask(t_tc
[i
], TGSI_WRITEMASK_YZ
), ureg_src(t_tc
[i
]));
157 ureg_MOV(shader
, ureg_writemask(t_tc
[i
], TGSI_WRITEMASK_W
),
158 ureg_imm1f(shader
, i
? 1.0f
: 0.0f
));
159 ureg_ADD(shader
, ureg_writemask(t_tc
[i
], TGSI_WRITEMASK_YZ
),
160 ureg_src(t_tc
[i
]), ureg_imm1f(shader
, 0.5f
));
161 ureg_MUL(shader
, ureg_writemask(t_tc
[i
], TGSI_WRITEMASK_Y
),
162 ureg_src(t_tc
[i
]), ureg_scalar(i_tc
[0], TGSI_SWIZZLE_W
));
163 ureg_MUL(shader
, ureg_writemask(t_tc
[i
], TGSI_WRITEMASK_Z
),
164 ureg_src(t_tc
[i
]), ureg_scalar(i_tc
[1], TGSI_SWIZZLE_W
));
168 * texel[0..1].x = tex(t_tc[0..1][0])
169 * texel[0..1].y = tex(t_tc[0..1][1])
170 * texel[0..1].z = tex(t_tc[0..1][2])
172 for (i
= 0; i
< 2; ++i
)
173 for (j
= 0; j
< 3; ++j
) {
174 struct ureg_src src
= ureg_swizzle(ureg_src(t_tc
[i
]),
175 TGSI_SWIZZLE_X
, j
? TGSI_SWIZZLE_Z
: TGSI_SWIZZLE_Y
, TGSI_SWIZZLE_W
, TGSI_SWIZZLE_W
);
177 ureg_TEX(shader
, ureg_writemask(t_texel
[i
], TGSI_WRITEMASK_X
<< j
),
178 TGSI_TEXTURE_2D_ARRAY
, src
, sampler
[j
]);
181 /* calculate linear interpolation factor
182 * factor = |round(i_tc.y) - i_tc.y| * 2
184 ureg_ROUND(shader
, ureg_writemask(t_tc
[0], TGSI_WRITEMASK_YZ
), i_tc
[0]);
185 ureg_ADD(shader
, ureg_writemask(t_tc
[0], TGSI_WRITEMASK_YZ
),
186 ureg_src(t_tc
[0]), ureg_negate(i_tc
[0]));
187 ureg_MUL(shader
, ureg_writemask(t_tc
[0], TGSI_WRITEMASK_YZ
),
188 ureg_abs(ureg_src(t_tc
[0])), ureg_imm1f(shader
, 2.0f
));
189 ureg_LRP(shader
, fragment
, ureg_swizzle(ureg_src(t_tc
[0]),
190 TGSI_SWIZZLE_Y
, TGSI_SWIZZLE_Z
, TGSI_SWIZZLE_Z
, TGSI_SWIZZLE_Z
),
191 ureg_src(t_texel
[0]), ureg_src(t_texel
[1]));
193 for (i
= 0; i
< 2; ++i
) {
194 ureg_release_temporary(shader
, t_texel
[i
]);
195 ureg_release_temporary(shader
, t_tc
[i
]);
200 create_frag_shader_csc(struct ureg_program
*shader
, struct ureg_dst texel
,
201 struct ureg_dst fragment
)
203 struct ureg_src csc
[3];
204 struct ureg_src lumakey
;
205 struct ureg_dst temp
[2];
208 for (i
= 0; i
< 3; ++i
)
209 csc
[i
] = ureg_DECL_constant(shader
, i
);
211 lumakey
= ureg_DECL_constant(shader
, 3);
213 for (i
= 0; i
< 2; ++i
)
214 temp
[i
] = ureg_DECL_temporary(shader
);
216 ureg_MOV(shader
, ureg_writemask(texel
, TGSI_WRITEMASK_W
),
217 ureg_imm1f(shader
, 1.0f
));
219 for (i
= 0; i
< 3; ++i
)
220 ureg_DP4(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_X
<< i
), csc
[i
],
223 ureg_MOV(shader
, ureg_writemask(temp
[0], TGSI_WRITEMASK_W
),
224 ureg_scalar(ureg_src(texel
), TGSI_SWIZZLE_Z
));
225 ureg_SLE(shader
, ureg_writemask(temp
[1],TGSI_WRITEMASK_W
),
226 ureg_src(temp
[0]), ureg_scalar(lumakey
, TGSI_SWIZZLE_X
));
227 ureg_SGT(shader
, ureg_writemask(temp
[0],TGSI_WRITEMASK_W
),
228 ureg_src(temp
[0]), ureg_scalar(lumakey
, TGSI_SWIZZLE_Y
));
229 ureg_MAX(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_W
),
230 ureg_src(temp
[0]), ureg_src(temp
[1]));
232 for (i
= 0; i
< 2; ++i
)
233 ureg_release_temporary(shader
, temp
[i
]);
237 create_frag_shader_video_buffer(struct vl_compositor
*c
)
239 struct ureg_program
*shader
;
241 struct ureg_src sampler
[3];
242 struct ureg_dst texel
;
243 struct ureg_dst fragment
;
246 shader
= ureg_create(PIPE_SHADER_FRAGMENT
);
250 tc
= ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VTEX
, TGSI_INTERPOLATE_LINEAR
);
251 for (i
= 0; i
< 3; ++i
)
252 sampler
[i
] = ureg_DECL_sampler(shader
, i
);
254 texel
= ureg_DECL_temporary(shader
);
255 fragment
= ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, 0);
258 * texel.xyz = tex(tc, sampler[i])
259 * fragment = csc * texel
261 for (i
= 0; i
< 3; ++i
)
262 ureg_TEX(shader
, ureg_writemask(texel
, TGSI_WRITEMASK_X
<< i
), TGSI_TEXTURE_2D_ARRAY
, tc
, sampler
[i
]);
264 create_frag_shader_csc(shader
, texel
, fragment
);
266 ureg_release_temporary(shader
, texel
);
269 return ureg_create_shader_and_destroy(shader
, c
->pipe
);
273 create_frag_shader_weave_rgb(struct vl_compositor
*c
)
275 struct ureg_program
*shader
;
276 struct ureg_dst texel
, fragment
;
278 shader
= ureg_create(PIPE_SHADER_FRAGMENT
);
282 texel
= ureg_DECL_temporary(shader
);
283 fragment
= ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, 0);
285 create_frag_shader_weave(shader
, texel
);
286 create_frag_shader_csc(shader
, texel
, fragment
);
288 ureg_release_temporary(shader
, texel
);
292 return ureg_create_shader_and_destroy(shader
, c
->pipe
);
296 create_frag_shader_weave_yuv(struct vl_compositor
*c
, bool y
)
298 struct ureg_program
*shader
;
299 struct ureg_dst texel
, fragment
;
301 shader
= ureg_create(PIPE_SHADER_FRAGMENT
);
305 texel
= ureg_DECL_temporary(shader
);
306 fragment
= ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, 0);
308 create_frag_shader_weave(shader
, texel
);
311 ureg_MOV(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_X
), ureg_src(texel
));
313 ureg_MOV(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_XY
),
314 ureg_swizzle(ureg_src(texel
), TGSI_SWIZZLE_Y
,
315 TGSI_SWIZZLE_Z
, TGSI_SWIZZLE_W
, TGSI_SWIZZLE_W
));
317 ureg_release_temporary(shader
, texel
);
321 return ureg_create_shader_and_destroy(shader
, c
->pipe
);
325 create_frag_shader_palette(struct vl_compositor
*c
, bool include_cc
)
327 struct ureg_program
*shader
;
328 struct ureg_src csc
[3];
330 struct ureg_src sampler
;
331 struct ureg_src palette
;
332 struct ureg_dst texel
;
333 struct ureg_dst fragment
;
336 shader
= ureg_create(PIPE_SHADER_FRAGMENT
);
340 for (i
= 0; include_cc
&& i
< 3; ++i
)
341 csc
[i
] = ureg_DECL_constant(shader
, i
);
343 tc
= ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VTEX
, TGSI_INTERPOLATE_LINEAR
);
344 sampler
= ureg_DECL_sampler(shader
, 0);
345 palette
= ureg_DECL_sampler(shader
, 1);
347 texel
= ureg_DECL_temporary(shader
);
348 fragment
= ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, 0);
351 * texel = tex(tc, sampler)
352 * fragment.xyz = tex(texel, palette) * csc
353 * fragment.a = texel.a
355 ureg_TEX(shader
, texel
, TGSI_TEXTURE_2D
, tc
, sampler
);
356 ureg_MOV(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_W
), ureg_src(texel
));
359 ureg_TEX(shader
, texel
, TGSI_TEXTURE_1D
, ureg_src(texel
), palette
);
360 for (i
= 0; i
< 3; ++i
)
361 ureg_DP4(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_X
<< i
), csc
[i
], ureg_src(texel
));
363 ureg_TEX(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_XYZ
),
364 TGSI_TEXTURE_1D
, ureg_src(texel
), palette
);
367 ureg_release_temporary(shader
, texel
);
370 return ureg_create_shader_and_destroy(shader
, c
->pipe
);
374 create_frag_shader_rgba(struct vl_compositor
*c
)
376 struct ureg_program
*shader
;
377 struct ureg_src tc
, color
, sampler
;
378 struct ureg_dst texel
, fragment
;
380 shader
= ureg_create(PIPE_SHADER_FRAGMENT
);
384 tc
= ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VTEX
, TGSI_INTERPOLATE_LINEAR
);
385 color
= ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_COLOR
, VS_O_COLOR
, TGSI_INTERPOLATE_LINEAR
);
386 sampler
= ureg_DECL_sampler(shader
, 0);
387 texel
= ureg_DECL_temporary(shader
);
388 fragment
= ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, 0);
391 * fragment = tex(tc, sampler)
393 ureg_TEX(shader
, texel
, TGSI_TEXTURE_2D
, tc
, sampler
);
394 ureg_MUL(shader
, fragment
, ureg_src(texel
), color
);
397 return ureg_create_shader_and_destroy(shader
, c
->pipe
);
401 init_shaders(struct vl_compositor
*c
)
405 c
->vs
= create_vert_shader(c
);
407 debug_printf("Unable to create vertex shader.\n");
411 c
->fs_video_buffer
= create_frag_shader_video_buffer(c
);
412 if (!c
->fs_video_buffer
) {
413 debug_printf("Unable to create YCbCr-to-RGB fragment shader.\n");
417 c
->fs_weave_rgb
= create_frag_shader_weave_rgb(c
);
418 if (!c
->fs_weave_rgb
) {
419 debug_printf("Unable to create YCbCr-to-RGB weave fragment shader.\n");
423 c
->fs_weave_yuv
.y
= create_frag_shader_weave_yuv(c
, true);
424 c
->fs_weave_yuv
.uv
= create_frag_shader_weave_yuv(c
, false);
425 if (!c
->fs_weave_yuv
.y
|| !c
->fs_weave_yuv
.uv
) {
426 debug_printf("Unable to create YCbCr i-to-YCbCr p weave fragment shader.\n");
430 c
->fs_palette
.yuv
= create_frag_shader_palette(c
, true);
431 if (!c
->fs_palette
.yuv
) {
432 debug_printf("Unable to create YUV-Palette-to-RGB fragment shader.\n");
436 c
->fs_palette
.rgb
= create_frag_shader_palette(c
, false);
437 if (!c
->fs_palette
.rgb
) {
438 debug_printf("Unable to create RGB-Palette-to-RGB fragment shader.\n");
442 c
->fs_rgba
= create_frag_shader_rgba(c
);
444 debug_printf("Unable to create RGB-to-RGB fragment shader.\n");
451 static void cleanup_shaders(struct vl_compositor
*c
)
455 c
->pipe
->delete_vs_state(c
->pipe
, c
->vs
);
456 c
->pipe
->delete_fs_state(c
->pipe
, c
->fs_video_buffer
);
457 c
->pipe
->delete_fs_state(c
->pipe
, c
->fs_weave_rgb
);
458 c
->pipe
->delete_fs_state(c
->pipe
, c
->fs_weave_yuv
.y
);
459 c
->pipe
->delete_fs_state(c
->pipe
, c
->fs_weave_yuv
.uv
);
460 c
->pipe
->delete_fs_state(c
->pipe
, c
->fs_palette
.yuv
);
461 c
->pipe
->delete_fs_state(c
->pipe
, c
->fs_palette
.rgb
);
462 c
->pipe
->delete_fs_state(c
->pipe
, c
->fs_rgba
);
466 init_pipe_state(struct vl_compositor
*c
)
468 struct pipe_rasterizer_state rast
;
469 struct pipe_sampler_state sampler
;
470 struct pipe_blend_state blend
;
471 struct pipe_depth_stencil_alpha_state dsa
;
476 c
->fb_state
.nr_cbufs
= 1;
477 c
->fb_state
.zsbuf
= NULL
;
479 memset(&sampler
, 0, sizeof(sampler
));
480 sampler
.wrap_s
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
481 sampler
.wrap_t
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
482 sampler
.wrap_r
= PIPE_TEX_WRAP_REPEAT
;
483 sampler
.min_img_filter
= PIPE_TEX_FILTER_LINEAR
;
484 sampler
.min_mip_filter
= PIPE_TEX_MIPFILTER_NONE
;
485 sampler
.mag_img_filter
= PIPE_TEX_FILTER_LINEAR
;
486 sampler
.compare_mode
= PIPE_TEX_COMPARE_NONE
;
487 sampler
.compare_func
= PIPE_FUNC_ALWAYS
;
488 sampler
.normalized_coords
= 1;
490 c
->sampler_linear
= c
->pipe
->create_sampler_state(c
->pipe
, &sampler
);
492 sampler
.min_img_filter
= PIPE_TEX_FILTER_NEAREST
;
493 sampler
.mag_img_filter
= PIPE_TEX_FILTER_NEAREST
;
494 c
->sampler_nearest
= c
->pipe
->create_sampler_state(c
->pipe
, &sampler
);
496 memset(&blend
, 0, sizeof blend
);
497 blend
.independent_blend_enable
= 0;
498 blend
.rt
[0].blend_enable
= 0;
499 blend
.logicop_enable
= 0;
500 blend
.logicop_func
= PIPE_LOGICOP_CLEAR
;
501 blend
.rt
[0].colormask
= PIPE_MASK_RGBA
;
503 c
->blend_clear
= c
->pipe
->create_blend_state(c
->pipe
, &blend
);
505 blend
.rt
[0].blend_enable
= 1;
506 blend
.rt
[0].rgb_func
= PIPE_BLEND_ADD
;
507 blend
.rt
[0].rgb_src_factor
= PIPE_BLENDFACTOR_SRC_ALPHA
;
508 blend
.rt
[0].rgb_dst_factor
= PIPE_BLENDFACTOR_INV_SRC_ALPHA
;
509 blend
.rt
[0].alpha_func
= PIPE_BLEND_ADD
;
510 blend
.rt
[0].alpha_src_factor
= PIPE_BLENDFACTOR_ONE
;
511 blend
.rt
[0].alpha_dst_factor
= PIPE_BLENDFACTOR_ONE
;
512 c
->blend_add
= c
->pipe
->create_blend_state(c
->pipe
, &blend
);
514 memset(&rast
, 0, sizeof rast
);
517 rast
.cull_face
= PIPE_FACE_NONE
;
518 rast
.fill_back
= PIPE_POLYGON_MODE_FILL
;
519 rast
.fill_front
= PIPE_POLYGON_MODE_FILL
;
522 rast
.point_size_per_vertex
= 1;
523 rast
.offset_units
= 1;
524 rast
.offset_scale
= 1;
525 rast
.half_pixel_center
= 1;
526 rast
.bottom_edge_rule
= 1;
529 c
->rast
= c
->pipe
->create_rasterizer_state(c
->pipe
, &rast
);
531 memset(&dsa
, 0, sizeof dsa
);
532 dsa
.depth
.enabled
= 0;
533 dsa
.depth
.writemask
= 0;
534 dsa
.depth
.func
= PIPE_FUNC_ALWAYS
;
535 for (i
= 0; i
< 2; ++i
) {
536 dsa
.stencil
[i
].enabled
= 0;
537 dsa
.stencil
[i
].func
= PIPE_FUNC_ALWAYS
;
538 dsa
.stencil
[i
].fail_op
= PIPE_STENCIL_OP_KEEP
;
539 dsa
.stencil
[i
].zpass_op
= PIPE_STENCIL_OP_KEEP
;
540 dsa
.stencil
[i
].zfail_op
= PIPE_STENCIL_OP_KEEP
;
541 dsa
.stencil
[i
].valuemask
= 0;
542 dsa
.stencil
[i
].writemask
= 0;
544 dsa
.alpha
.enabled
= 0;
545 dsa
.alpha
.func
= PIPE_FUNC_ALWAYS
;
546 dsa
.alpha
.ref_value
= 0;
547 c
->dsa
= c
->pipe
->create_depth_stencil_alpha_state(c
->pipe
, &dsa
);
548 c
->pipe
->bind_depth_stencil_alpha_state(c
->pipe
, c
->dsa
);
553 static void cleanup_pipe_state(struct vl_compositor
*c
)
557 /* Asserted in softpipe_delete_fs_state() for some reason */
558 c
->pipe
->bind_vs_state(c
->pipe
, NULL
);
559 c
->pipe
->bind_fs_state(c
->pipe
, NULL
);
561 c
->pipe
->delete_depth_stencil_alpha_state(c
->pipe
, c
->dsa
);
562 c
->pipe
->delete_sampler_state(c
->pipe
, c
->sampler_linear
);
563 c
->pipe
->delete_sampler_state(c
->pipe
, c
->sampler_nearest
);
564 c
->pipe
->delete_blend_state(c
->pipe
, c
->blend_clear
);
565 c
->pipe
->delete_blend_state(c
->pipe
, c
->blend_add
);
566 c
->pipe
->delete_rasterizer_state(c
->pipe
, c
->rast
);
570 init_buffers(struct vl_compositor
*c
)
572 struct pipe_vertex_element vertex_elems
[3];
577 * Create our vertex buffer and vertex buffer elements
579 c
->vertex_buf
.stride
= sizeof(struct vertex2f
) + sizeof(struct vertex4f
) * 2;
580 c
->vertex_buf
.buffer_offset
= 0;
581 c
->vertex_buf
.buffer
= NULL
;
583 vertex_elems
[0].src_offset
= 0;
584 vertex_elems
[0].instance_divisor
= 0;
585 vertex_elems
[0].vertex_buffer_index
= 0;
586 vertex_elems
[0].src_format
= PIPE_FORMAT_R32G32_FLOAT
;
587 vertex_elems
[1].src_offset
= sizeof(struct vertex2f
);
588 vertex_elems
[1].instance_divisor
= 0;
589 vertex_elems
[1].vertex_buffer_index
= 0;
590 vertex_elems
[1].src_format
= PIPE_FORMAT_R32G32B32A32_FLOAT
;
591 vertex_elems
[2].src_offset
= sizeof(struct vertex2f
) + sizeof(struct vertex4f
);
592 vertex_elems
[2].instance_divisor
= 0;
593 vertex_elems
[2].vertex_buffer_index
= 0;
594 vertex_elems
[2].src_format
= PIPE_FORMAT_R32G32B32A32_FLOAT
;
595 c
->vertex_elems_state
= c
->pipe
->create_vertex_elements_state(c
->pipe
, 3, vertex_elems
);
601 cleanup_buffers(struct vl_compositor
*c
)
605 c
->pipe
->delete_vertex_elements_state(c
->pipe
, c
->vertex_elems_state
);
606 pipe_resource_reference(&c
->vertex_buf
.buffer
, NULL
);
609 static inline struct u_rect
610 default_rect(struct vl_compositor_layer
*layer
)
612 struct pipe_resource
*res
= layer
->sampler_views
[0]->texture
;
613 struct u_rect rect
= { 0, res
->width0
, 0, res
->height0
* res
->array_size
};
617 static inline struct vertex2f
618 calc_topleft(struct vertex2f size
, struct u_rect rect
)
620 struct vertex2f res
= { rect
.x0
/ size
.x
, rect
.y0
/ size
.y
};
624 static inline struct vertex2f
625 calc_bottomright(struct vertex2f size
, struct u_rect rect
)
627 struct vertex2f res
= { rect
.x1
/ size
.x
, rect
.y1
/ size
.y
};
632 calc_src_and_dst(struct vl_compositor_layer
*layer
, unsigned width
, unsigned height
,
633 struct u_rect src
, struct u_rect dst
)
635 struct vertex2f size
= { width
, height
};
637 layer
->src
.tl
= calc_topleft(size
, src
);
638 layer
->src
.br
= calc_bottomright(size
, src
);
639 layer
->dst
.tl
= calc_topleft(size
, dst
);
640 layer
->dst
.br
= calc_bottomright(size
, dst
);
642 layer
->zw
.y
= size
.y
;
646 gen_rect_verts(struct vertex2f
*vb
, struct vl_compositor_layer
*layer
)
648 struct vertex2f tl
, tr
, br
, bl
;
652 switch (layer
->rotate
) {
654 case VL_COMPOSITOR_ROTATE_0
:
656 tr
.x
= layer
->dst
.br
.x
;
657 tr
.y
= layer
->dst
.tl
.y
;
659 bl
.x
= layer
->dst
.tl
.x
;
660 bl
.y
= layer
->dst
.br
.y
;
662 case VL_COMPOSITOR_ROTATE_90
:
663 tl
.x
= layer
->dst
.br
.x
;
664 tl
.y
= layer
->dst
.tl
.y
;
666 br
.x
= layer
->dst
.tl
.x
;
667 br
.y
= layer
->dst
.br
.y
;
670 case VL_COMPOSITOR_ROTATE_180
:
672 tr
.x
= layer
->dst
.tl
.x
;
673 tr
.y
= layer
->dst
.br
.y
;
675 bl
.x
= layer
->dst
.br
.x
;
676 bl
.y
= layer
->dst
.tl
.y
;
678 case VL_COMPOSITOR_ROTATE_270
:
679 tl
.x
= layer
->dst
.tl
.x
;
680 tl
.y
= layer
->dst
.br
.y
;
682 br
.x
= layer
->dst
.br
.x
;
683 br
.y
= layer
->dst
.tl
.y
;
690 vb
[ 1].x
= layer
->src
.tl
.x
;
691 vb
[ 1].y
= layer
->src
.tl
.y
;
693 vb
[ 3].x
= layer
->colors
[0].x
;
694 vb
[ 3].y
= layer
->colors
[0].y
;
695 vb
[ 4].x
= layer
->colors
[0].z
;
696 vb
[ 4].y
= layer
->colors
[0].w
;
700 vb
[ 6].x
= layer
->src
.br
.x
;
701 vb
[ 6].y
= layer
->src
.tl
.y
;
703 vb
[ 8].x
= layer
->colors
[1].x
;
704 vb
[ 8].y
= layer
->colors
[1].y
;
705 vb
[ 9].x
= layer
->colors
[1].z
;
706 vb
[ 9].y
= layer
->colors
[1].w
;
710 vb
[11].x
= layer
->src
.br
.x
;
711 vb
[11].y
= layer
->src
.br
.y
;
713 vb
[13].x
= layer
->colors
[2].x
;
714 vb
[13].y
= layer
->colors
[2].y
;
715 vb
[14].x
= layer
->colors
[2].z
;
716 vb
[14].y
= layer
->colors
[2].w
;
720 vb
[16].x
= layer
->src
.tl
.x
;
721 vb
[16].y
= layer
->src
.br
.y
;
723 vb
[18].x
= layer
->colors
[3].x
;
724 vb
[18].y
= layer
->colors
[3].y
;
725 vb
[19].x
= layer
->colors
[3].z
;
726 vb
[19].y
= layer
->colors
[3].w
;
729 static inline struct u_rect
730 calc_drawn_area(struct vl_compositor_state
*s
, struct vl_compositor_layer
*layer
)
732 struct vertex2f tl
, br
;
733 struct u_rect result
;
738 switch (layer
->rotate
) {
740 case VL_COMPOSITOR_ROTATE_0
:
744 case VL_COMPOSITOR_ROTATE_90
:
745 tl
.x
= layer
->dst
.br
.x
;
746 tl
.y
= layer
->dst
.tl
.y
;
747 br
.x
= layer
->dst
.tl
.x
;
748 br
.y
= layer
->dst
.br
.y
;
750 case VL_COMPOSITOR_ROTATE_180
:
754 case VL_COMPOSITOR_ROTATE_270
:
755 tl
.x
= layer
->dst
.tl
.x
;
756 tl
.y
= layer
->dst
.br
.y
;
757 br
.x
= layer
->dst
.br
.x
;
758 br
.y
= layer
->dst
.tl
.y
;
763 result
.x0
= tl
.x
* layer
->viewport
.scale
[0] + layer
->viewport
.translate
[0];
764 result
.y0
= tl
.y
* layer
->viewport
.scale
[1] + layer
->viewport
.translate
[1];
765 result
.x1
= br
.x
* layer
->viewport
.scale
[0] + layer
->viewport
.translate
[0];
766 result
.y1
= br
.y
* layer
->viewport
.scale
[1] + layer
->viewport
.translate
[1];
769 result
.x0
= MAX2(result
.x0
, s
->scissor
.minx
);
770 result
.y0
= MAX2(result
.y0
, s
->scissor
.miny
);
771 result
.x1
= MIN2(result
.x1
, s
->scissor
.maxx
);
772 result
.y1
= MIN2(result
.y1
, s
->scissor
.maxy
);
777 gen_vertex_data(struct vl_compositor
*c
, struct vl_compositor_state
*s
, struct u_rect
*dirty
)
784 /* Allocate new memory for vertices. */
785 u_upload_alloc(c
->upload
, 0,
786 c
->vertex_buf
.stride
* VL_COMPOSITOR_MAX_LAYERS
* 4, /* size */
788 &c
->vertex_buf
.buffer_offset
, &c
->vertex_buf
.buffer
,
791 for (i
= 0; i
< VL_COMPOSITOR_MAX_LAYERS
; i
++) {
792 if (s
->used_layers
& (1 << i
)) {
793 struct vl_compositor_layer
*layer
= &s
->layers
[i
];
794 gen_rect_verts(vb
, layer
);
797 if (!layer
->viewport_valid
) {
798 layer
->viewport
.scale
[0] = c
->fb_state
.width
;
799 layer
->viewport
.scale
[1] = c
->fb_state
.height
;
800 layer
->viewport
.translate
[0] = 0;
801 layer
->viewport
.translate
[1] = 0;
804 if (dirty
&& layer
->clearing
) {
805 struct u_rect drawn
= calc_drawn_area(s
, layer
);
807 dirty
->x0
>= drawn
.x0
&&
808 dirty
->y0
>= drawn
.y0
&&
809 dirty
->x1
<= drawn
.x1
&&
810 dirty
->y1
<= drawn
.y1
) {
812 // We clear the dirty area anyway, no need for clear_render_target
813 dirty
->x0
= dirty
->y0
= MAX_DIRTY
;
814 dirty
->x1
= dirty
->y1
= MIN_DIRTY
;
820 u_upload_unmap(c
->upload
);
824 draw_layers(struct vl_compositor
*c
, struct vl_compositor_state
*s
, struct u_rect
*dirty
)
826 unsigned vb_index
, i
;
830 for (i
= 0, vb_index
= 0; i
< VL_COMPOSITOR_MAX_LAYERS
; ++i
) {
831 if (s
->used_layers
& (1 << i
)) {
832 struct vl_compositor_layer
*layer
= &s
->layers
[i
];
833 struct pipe_sampler_view
**samplers
= &layer
->sampler_views
[0];
834 unsigned num_sampler_views
= !samplers
[1] ? 1 : !samplers
[2] ? 2 : 3;
835 void *blend
= layer
->blend
? layer
->blend
: i
? c
->blend_add
: c
->blend_clear
;
837 c
->pipe
->bind_blend_state(c
->pipe
, blend
);
838 c
->pipe
->set_viewport_states(c
->pipe
, 0, 1, &layer
->viewport
);
839 c
->pipe
->bind_fs_state(c
->pipe
, layer
->fs
);
840 c
->pipe
->bind_sampler_states(c
->pipe
, PIPE_SHADER_FRAGMENT
, 0,
841 num_sampler_views
, layer
->samplers
);
842 c
->pipe
->set_sampler_views(c
->pipe
, PIPE_SHADER_FRAGMENT
, 0,
843 num_sampler_views
, samplers
);
845 util_draw_arrays(c
->pipe
, PIPE_PRIM_QUADS
, vb_index
* 4, 4);
849 // Remember the currently drawn area as dirty for the next draw command
850 struct u_rect drawn
= calc_drawn_area(s
, layer
);
851 dirty
->x0
= MIN2(drawn
.x0
, dirty
->x0
);
852 dirty
->y0
= MIN2(drawn
.y0
, dirty
->y0
);
853 dirty
->x1
= MAX2(drawn
.x1
, dirty
->x1
);
854 dirty
->y1
= MAX2(drawn
.y1
, dirty
->y1
);
861 vl_compositor_reset_dirty_area(struct u_rect
*dirty
)
865 dirty
->x0
= dirty
->y0
= MIN_DIRTY
;
866 dirty
->x1
= dirty
->y1
= MAX_DIRTY
;
870 vl_compositor_set_clear_color(struct vl_compositor_state
*s
, union pipe_color_union
*color
)
875 s
->clear_color
= *color
;
879 vl_compositor_get_clear_color(struct vl_compositor_state
*s
, union pipe_color_union
*color
)
884 *color
= s
->clear_color
;
888 vl_compositor_clear_layers(struct vl_compositor_state
*s
)
895 for ( i
= 0; i
< VL_COMPOSITOR_MAX_LAYERS
; ++i
) {
896 struct vertex4f v_one
= { 1.0f
, 1.0f
, 1.0f
, 1.0f
};
897 s
->layers
[i
].clearing
= i
? false : true;
898 s
->layers
[i
].blend
= NULL
;
899 s
->layers
[i
].fs
= NULL
;
900 s
->layers
[i
].viewport
.scale
[2] = 1;
901 s
->layers
[i
].viewport
.translate
[2] = 0;
902 s
->layers
[i
].rotate
= VL_COMPOSITOR_ROTATE_0
;
904 for ( j
= 0; j
< 3; j
++)
905 pipe_sampler_view_reference(&s
->layers
[i
].sampler_views
[j
], NULL
);
906 for ( j
= 0; j
< 4; ++j
)
907 s
->layers
[i
].colors
[j
] = v_one
;
912 vl_compositor_cleanup(struct vl_compositor
*c
)
916 u_upload_destroy(c
->upload
);
919 cleanup_pipe_state(c
);
923 vl_compositor_set_csc_matrix(struct vl_compositor_state
*s
,
924 vl_csc_matrix
const *matrix
,
925 float luma_min
, float luma_max
)
927 struct pipe_transfer
*buf_transfer
;
931 float *ptr
= pipe_buffer_map(s
->pipe
, s
->csc_matrix
,
932 PIPE_TRANSFER_WRITE
| PIPE_TRANSFER_DISCARD_RANGE
,
938 memcpy(ptr
, matrix
, sizeof(vl_csc_matrix
));
940 ptr
+= sizeof(vl_csc_matrix
)/sizeof(float);
944 pipe_buffer_unmap(s
->pipe
, buf_transfer
);
950 vl_compositor_set_dst_clip(struct vl_compositor_state
*s
, struct u_rect
*dst_clip
)
954 s
->scissor_valid
= dst_clip
!= NULL
;
956 s
->scissor
.minx
= dst_clip
->x0
;
957 s
->scissor
.miny
= dst_clip
->y0
;
958 s
->scissor
.maxx
= dst_clip
->x1
;
959 s
->scissor
.maxy
= dst_clip
->y1
;
964 vl_compositor_set_layer_blend(struct vl_compositor_state
*s
,
965 unsigned layer
, void *blend
,
970 assert(layer
< VL_COMPOSITOR_MAX_LAYERS
);
972 s
->layers
[layer
].clearing
= is_clearing
;
973 s
->layers
[layer
].blend
= blend
;
977 vl_compositor_set_layer_dst_area(struct vl_compositor_state
*s
,
978 unsigned layer
, struct u_rect
*dst_area
)
982 assert(layer
< VL_COMPOSITOR_MAX_LAYERS
);
984 s
->layers
[layer
].viewport_valid
= dst_area
!= NULL
;
986 s
->layers
[layer
].viewport
.scale
[0] = dst_area
->x1
- dst_area
->x0
;
987 s
->layers
[layer
].viewport
.scale
[1] = dst_area
->y1
- dst_area
->y0
;
988 s
->layers
[layer
].viewport
.translate
[0] = dst_area
->x0
;
989 s
->layers
[layer
].viewport
.translate
[1] = dst_area
->y0
;
994 vl_compositor_set_buffer_layer(struct vl_compositor_state
*s
,
995 struct vl_compositor
*c
,
997 struct pipe_video_buffer
*buffer
,
998 struct u_rect
*src_rect
,
999 struct u_rect
*dst_rect
,
1000 enum vl_compositor_deinterlace deinterlace
)
1002 struct pipe_sampler_view
**sampler_views
;
1005 assert(s
&& c
&& buffer
);
1007 assert(layer
< VL_COMPOSITOR_MAX_LAYERS
);
1009 s
->used_layers
|= 1 << layer
;
1010 sampler_views
= buffer
->get_sampler_view_components(buffer
);
1011 for (i
= 0; i
< 3; ++i
) {
1012 s
->layers
[layer
].samplers
[i
] = c
->sampler_linear
;
1013 pipe_sampler_view_reference(&s
->layers
[layer
].sampler_views
[i
], sampler_views
[i
]);
1016 calc_src_and_dst(&s
->layers
[layer
], buffer
->width
, buffer
->height
,
1017 src_rect
? *src_rect
: default_rect(&s
->layers
[layer
]),
1018 dst_rect
? *dst_rect
: default_rect(&s
->layers
[layer
]));
1020 if (buffer
->interlaced
) {
1021 float half_a_line
= 0.5f
/ s
->layers
[layer
].zw
.y
;
1022 switch(deinterlace
) {
1023 case VL_COMPOSITOR_WEAVE
:
1024 s
->layers
[layer
].fs
= c
->fs_weave_rgb
;
1027 case VL_COMPOSITOR_BOB_TOP
:
1028 s
->layers
[layer
].zw
.x
= 0.0f
;
1029 s
->layers
[layer
].src
.tl
.y
+= half_a_line
;
1030 s
->layers
[layer
].src
.br
.y
+= half_a_line
;
1031 s
->layers
[layer
].fs
= c
->fs_video_buffer
;
1034 case VL_COMPOSITOR_BOB_BOTTOM
:
1035 s
->layers
[layer
].zw
.x
= 1.0f
;
1036 s
->layers
[layer
].src
.tl
.y
-= half_a_line
;
1037 s
->layers
[layer
].src
.br
.y
-= half_a_line
;
1038 s
->layers
[layer
].fs
= c
->fs_video_buffer
;
1043 s
->layers
[layer
].fs
= c
->fs_video_buffer
;
1047 vl_compositor_set_palette_layer(struct vl_compositor_state
*s
,
1048 struct vl_compositor
*c
,
1050 struct pipe_sampler_view
*indexes
,
1051 struct pipe_sampler_view
*palette
,
1052 struct u_rect
*src_rect
,
1053 struct u_rect
*dst_rect
,
1054 bool include_color_conversion
)
1056 assert(s
&& c
&& indexes
&& palette
);
1058 assert(layer
< VL_COMPOSITOR_MAX_LAYERS
);
1060 s
->used_layers
|= 1 << layer
;
1062 s
->layers
[layer
].fs
= include_color_conversion
?
1063 c
->fs_palette
.yuv
: c
->fs_palette
.rgb
;
1065 s
->layers
[layer
].samplers
[0] = c
->sampler_linear
;
1066 s
->layers
[layer
].samplers
[1] = c
->sampler_nearest
;
1067 s
->layers
[layer
].samplers
[2] = NULL
;
1068 pipe_sampler_view_reference(&s
->layers
[layer
].sampler_views
[0], indexes
);
1069 pipe_sampler_view_reference(&s
->layers
[layer
].sampler_views
[1], palette
);
1070 pipe_sampler_view_reference(&s
->layers
[layer
].sampler_views
[2], NULL
);
1071 calc_src_and_dst(&s
->layers
[layer
], indexes
->texture
->width0
, indexes
->texture
->height0
,
1072 src_rect
? *src_rect
: default_rect(&s
->layers
[layer
]),
1073 dst_rect
? *dst_rect
: default_rect(&s
->layers
[layer
]));
1077 vl_compositor_set_rgba_layer(struct vl_compositor_state
*s
,
1078 struct vl_compositor
*c
,
1080 struct pipe_sampler_view
*rgba
,
1081 struct u_rect
*src_rect
,
1082 struct u_rect
*dst_rect
,
1083 struct vertex4f
*colors
)
1087 assert(s
&& c
&& rgba
);
1089 assert(layer
< VL_COMPOSITOR_MAX_LAYERS
);
1091 s
->used_layers
|= 1 << layer
;
1092 s
->layers
[layer
].fs
= c
->fs_rgba
;
1093 s
->layers
[layer
].samplers
[0] = c
->sampler_linear
;
1094 s
->layers
[layer
].samplers
[1] = NULL
;
1095 s
->layers
[layer
].samplers
[2] = NULL
;
1096 pipe_sampler_view_reference(&s
->layers
[layer
].sampler_views
[0], rgba
);
1097 pipe_sampler_view_reference(&s
->layers
[layer
].sampler_views
[1], NULL
);
1098 pipe_sampler_view_reference(&s
->layers
[layer
].sampler_views
[2], NULL
);
1099 calc_src_and_dst(&s
->layers
[layer
], rgba
->texture
->width0
, rgba
->texture
->height0
,
1100 src_rect
? *src_rect
: default_rect(&s
->layers
[layer
]),
1101 dst_rect
? *dst_rect
: default_rect(&s
->layers
[layer
]));
1104 for (i
= 0; i
< 4; ++i
)
1105 s
->layers
[layer
].colors
[i
] = colors
[i
];
1109 vl_compositor_set_layer_rotation(struct vl_compositor_state
*s
,
1111 enum vl_compositor_rotation rotate
)
1114 assert(layer
< VL_COMPOSITOR_MAX_LAYERS
);
1115 s
->layers
[layer
].rotate
= rotate
;
1119 vl_compositor_set_yuv_layer(struct vl_compositor_state
*s
,
1120 struct vl_compositor
*c
,
1122 struct pipe_video_buffer
*buffer
,
1123 struct u_rect
*src_rect
,
1124 struct u_rect
*dst_rect
,
1127 struct pipe_sampler_view
**sampler_views
;
1130 assert(s
&& c
&& buffer
);
1132 assert(layer
< VL_COMPOSITOR_MAX_LAYERS
);
1134 s
->used_layers
|= 1 << layer
;
1135 sampler_views
= buffer
->get_sampler_view_components(buffer
);
1136 for (i
= 0; i
< 3; ++i
) {
1137 s
->layers
[layer
].samplers
[i
] = c
->sampler_linear
;
1138 pipe_sampler_view_reference(&s
->layers
[layer
].sampler_views
[i
], sampler_views
[i
]);
1141 calc_src_and_dst(&s
->layers
[layer
], buffer
->width
, buffer
->height
,
1142 src_rect
? *src_rect
: default_rect(&s
->layers
[layer
]),
1143 dst_rect
? *dst_rect
: default_rect(&s
->layers
[layer
]));
1145 s
->layers
[layer
].fs
= (y
) ? c
->fs_weave_yuv
.y
: c
->fs_weave_yuv
.uv
;
1149 vl_compositor_render(struct vl_compositor_state
*s
,
1150 struct vl_compositor
*c
,
1151 struct pipe_surface
*dst_surface
,
1152 struct u_rect
*dirty_area
,
1156 assert(dst_surface
);
1158 c
->fb_state
.width
= dst_surface
->width
;
1159 c
->fb_state
.height
= dst_surface
->height
;
1160 c
->fb_state
.cbufs
[0] = dst_surface
;
1162 if (!s
->scissor_valid
) {
1163 s
->scissor
.minx
= 0;
1164 s
->scissor
.miny
= 0;
1165 s
->scissor
.maxx
= dst_surface
->width
;
1166 s
->scissor
.maxy
= dst_surface
->height
;
1168 c
->pipe
->set_scissor_states(c
->pipe
, 0, 1, &s
->scissor
);
1170 gen_vertex_data(c
, s
, dirty_area
);
1172 if (clear_dirty
&& dirty_area
&&
1173 (dirty_area
->x0
< dirty_area
->x1
|| dirty_area
->y0
< dirty_area
->y1
)) {
1175 c
->pipe
->clear_render_target(c
->pipe
, dst_surface
, &s
->clear_color
,
1176 0, 0, dst_surface
->width
, dst_surface
->height
, false);
1177 dirty_area
->x0
= dirty_area
->y0
= MAX_DIRTY
;
1178 dirty_area
->x1
= dirty_area
->y1
= MIN_DIRTY
;
1181 c
->pipe
->set_framebuffer_state(c
->pipe
, &c
->fb_state
);
1182 c
->pipe
->bind_vs_state(c
->pipe
, c
->vs
);
1183 c
->pipe
->set_vertex_buffers(c
->pipe
, 0, 1, &c
->vertex_buf
);
1184 c
->pipe
->bind_vertex_elements_state(c
->pipe
, c
->vertex_elems_state
);
1185 pipe_set_constant_buffer(c
->pipe
, PIPE_SHADER_FRAGMENT
, 0, s
->csc_matrix
);
1186 c
->pipe
->bind_rasterizer_state(c
->pipe
, c
->rast
);
1188 draw_layers(c
, s
, dirty_area
);
1192 vl_compositor_init(struct vl_compositor
*c
, struct pipe_context
*pipe
)
1196 memset(c
, 0, sizeof(*c
));
1200 c
->upload
= u_upload_create(pipe
, 128 * 1024, PIPE_BIND_VERTEX_BUFFER
,
1206 if (!init_pipe_state(c
)) {
1207 u_upload_destroy(c
->upload
);
1211 if (!init_shaders(c
)) {
1212 u_upload_destroy(c
->upload
);
1213 cleanup_pipe_state(c
);
1217 if (!init_buffers(c
)) {
1218 u_upload_destroy(c
->upload
);
1220 cleanup_pipe_state(c
);
1228 vl_compositor_init_state(struct vl_compositor_state
*s
, struct pipe_context
*pipe
)
1230 vl_csc_matrix csc_matrix
;
1234 memset(s
, 0, sizeof(*s
));
1238 s
->clear_color
.f
[0] = s
->clear_color
.f
[1] = 0.0f
;
1239 s
->clear_color
.f
[2] = s
->clear_color
.f
[3] = 0.0f
;
1242 * Create our fragment shader's constant buffer
1243 * Const buffer contains the color conversion matrix and bias vectors
1245 /* XXX: Create with IMMUTABLE/STATIC... although it does change every once in a long while... */
1246 s
->csc_matrix
= pipe_buffer_create
1249 PIPE_BIND_CONSTANT_BUFFER
,
1251 sizeof(csc_matrix
) + 2*sizeof(float)
1257 vl_compositor_clear_layers(s
);
1259 vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_IDENTITY
, NULL
, true, &csc_matrix
);
1260 if (!vl_compositor_set_csc_matrix(s
, (const vl_csc_matrix
*)&csc_matrix
, 1.0f
, 0.0f
))
1267 vl_compositor_cleanup_state(struct vl_compositor_state
*s
)
1271 vl_compositor_clear_layers(s
);
1272 pipe_resource_reference(&s
->csc_matrix
, NULL
);