vl/compositor: make vl_compositor_set_yuv_layer() static
[mesa.git] / src / gallium / auxiliary / vl / vl_compositor.c
1 /**************************************************************************
2 *
3 * Copyright 2009 Younes Manton.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <assert.h>
29
30 #include "pipe/p_compiler.h"
31 #include "pipe/p_context.h"
32
33 #include "util/u_memory.h"
34 #include "util/u_draw.h"
35 #include "util/u_surface.h"
36 #include "util/u_upload_mgr.h"
37
38 #include "tgsi/tgsi_ureg.h"
39
40 #include "vl_csc.h"
41 #include "vl_types.h"
42 #include "vl_compositor.h"
43
44 #define MIN_DIRTY (0)
45 #define MAX_DIRTY (1 << 15)
46
47 enum VS_OUTPUT
48 {
49 VS_O_VPOS = 0,
50 VS_O_COLOR = 0,
51 VS_O_VTEX = 0,
52 VS_O_VTOP,
53 VS_O_VBOTTOM,
54 };
55
56 static void *
57 create_vert_shader(struct vl_compositor *c)
58 {
59 struct ureg_program *shader;
60 struct ureg_src vpos, vtex, color;
61 struct ureg_dst tmp;
62 struct ureg_dst o_vpos, o_vtex, o_color;
63 struct ureg_dst o_vtop, o_vbottom;
64
65 shader = ureg_create(PIPE_SHADER_VERTEX);
66 if (!shader)
67 return false;
68
69 vpos = ureg_DECL_vs_input(shader, 0);
70 vtex = ureg_DECL_vs_input(shader, 1);
71 color = ureg_DECL_vs_input(shader, 2);
72 tmp = ureg_DECL_temporary(shader);
73 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
74 o_color = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, VS_O_COLOR);
75 o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX);
76 o_vtop = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP);
77 o_vbottom = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM);
78
79 /*
80 * o_vpos = vpos
81 * o_vtex = vtex
82 * o_color = color
83 */
84 ureg_MOV(shader, o_vpos, vpos);
85 ureg_MOV(shader, o_vtex, vtex);
86 ureg_MOV(shader, o_color, color);
87
88 /*
89 * tmp.x = vtex.w / 2
90 * tmp.y = vtex.w / 4
91 *
92 * o_vtop.x = vtex.x
93 * o_vtop.y = vtex.y * tmp.x + 0.25f
94 * o_vtop.z = vtex.y * tmp.y + 0.25f
95 * o_vtop.w = 1 / tmp.x
96 *
97 * o_vbottom.x = vtex.x
98 * o_vbottom.y = vtex.y * tmp.x - 0.25f
99 * o_vbottom.z = vtex.y * tmp.y - 0.25f
100 * o_vbottom.w = 1 / tmp.y
101 */
102 ureg_MUL(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X),
103 ureg_scalar(vtex, TGSI_SWIZZLE_W), ureg_imm1f(shader, 0.5f));
104 ureg_MUL(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y),
105 ureg_scalar(vtex, TGSI_SWIZZLE_W), ureg_imm1f(shader, 0.25f));
106
107 ureg_MOV(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_X), vtex);
108 ureg_MAD(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_Y), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
109 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X), ureg_imm1f(shader, 0.25f));
110 ureg_MAD(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_Z), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
111 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y), ureg_imm1f(shader, 0.25f));
112 ureg_RCP(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_W),
113 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X));
114
115 ureg_MOV(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_X), vtex);
116 ureg_MAD(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_Y), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
117 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X), ureg_imm1f(shader, -0.25f));
118 ureg_MAD(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_Z), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
119 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y), ureg_imm1f(shader, -0.25f));
120 ureg_RCP(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_W),
121 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y));
122
123 ureg_END(shader);
124
125 return ureg_create_shader_and_destroy(shader, c->pipe);
126 }
127
128 static void
129 create_frag_shader_weave(struct ureg_program *shader, struct ureg_dst fragment)
130 {
131 struct ureg_src i_tc[2];
132 struct ureg_src sampler[3];
133 struct ureg_dst t_tc[2];
134 struct ureg_dst t_texel[2];
135 unsigned i, j;
136
137 i_tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP, TGSI_INTERPOLATE_LINEAR);
138 i_tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM, TGSI_INTERPOLATE_LINEAR);
139
140 for (i = 0; i < 3; ++i) {
141 sampler[i] = ureg_DECL_sampler(shader, i);
142 ureg_DECL_sampler_view(shader, i, TGSI_TEXTURE_2D_ARRAY,
143 TGSI_RETURN_TYPE_FLOAT,
144 TGSI_RETURN_TYPE_FLOAT,
145 TGSI_RETURN_TYPE_FLOAT,
146 TGSI_RETURN_TYPE_FLOAT);
147 }
148
149 for (i = 0; i < 2; ++i) {
150 t_tc[i] = ureg_DECL_temporary(shader);
151 t_texel[i] = ureg_DECL_temporary(shader);
152 }
153
154 /* calculate the texture offsets
155 * t_tc.x = i_tc.x
156 * t_tc.y = (round(i_tc.y - 0.5) + 0.5) / height * 2
157 */
158 for (i = 0; i < 2; ++i) {
159 ureg_MOV(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_X), i_tc[i]);
160 ureg_ADD(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_YZ),
161 i_tc[i], ureg_imm1f(shader, -0.5f));
162 ureg_ROUND(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_YZ), ureg_src(t_tc[i]));
163 ureg_MOV(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_W),
164 ureg_imm1f(shader, i ? 1.0f : 0.0f));
165 ureg_ADD(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_YZ),
166 ureg_src(t_tc[i]), ureg_imm1f(shader, 0.5f));
167 ureg_MUL(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_Y),
168 ureg_src(t_tc[i]), ureg_scalar(i_tc[0], TGSI_SWIZZLE_W));
169 ureg_MUL(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_Z),
170 ureg_src(t_tc[i]), ureg_scalar(i_tc[1], TGSI_SWIZZLE_W));
171 }
172
173 /* fetch the texels
174 * texel[0..1].x = tex(t_tc[0..1][0])
175 * texel[0..1].y = tex(t_tc[0..1][1])
176 * texel[0..1].z = tex(t_tc[0..1][2])
177 */
178 for (i = 0; i < 2; ++i)
179 for (j = 0; j < 3; ++j) {
180 struct ureg_src src = ureg_swizzle(ureg_src(t_tc[i]),
181 TGSI_SWIZZLE_X, j ? TGSI_SWIZZLE_Z : TGSI_SWIZZLE_Y, TGSI_SWIZZLE_W, TGSI_SWIZZLE_W);
182
183 ureg_TEX(shader, ureg_writemask(t_texel[i], TGSI_WRITEMASK_X << j),
184 TGSI_TEXTURE_2D_ARRAY, src, sampler[j]);
185 }
186
187 /* calculate linear interpolation factor
188 * factor = |round(i_tc.y) - i_tc.y| * 2
189 */
190 ureg_ROUND(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_YZ), i_tc[0]);
191 ureg_ADD(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_YZ),
192 ureg_src(t_tc[0]), ureg_negate(i_tc[0]));
193 ureg_MUL(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_YZ),
194 ureg_abs(ureg_src(t_tc[0])), ureg_imm1f(shader, 2.0f));
195 ureg_LRP(shader, fragment, ureg_swizzle(ureg_src(t_tc[0]),
196 TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_Z),
197 ureg_src(t_texel[0]), ureg_src(t_texel[1]));
198
199 for (i = 0; i < 2; ++i) {
200 ureg_release_temporary(shader, t_texel[i]);
201 ureg_release_temporary(shader, t_tc[i]);
202 }
203 }
204
205 static void
206 create_frag_shader_csc(struct ureg_program *shader, struct ureg_dst texel,
207 struct ureg_dst fragment)
208 {
209 struct ureg_src csc[3];
210 struct ureg_src lumakey;
211 struct ureg_dst temp[2];
212 unsigned i;
213
214 for (i = 0; i < 3; ++i)
215 csc[i] = ureg_DECL_constant(shader, i);
216
217 lumakey = ureg_DECL_constant(shader, 3);
218
219 for (i = 0; i < 2; ++i)
220 temp[i] = ureg_DECL_temporary(shader);
221
222 ureg_MOV(shader, ureg_writemask(texel, TGSI_WRITEMASK_W),
223 ureg_imm1f(shader, 1.0f));
224
225 for (i = 0; i < 3; ++i)
226 ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i],
227 ureg_src(texel));
228
229 ureg_MOV(shader, ureg_writemask(temp[0], TGSI_WRITEMASK_W),
230 ureg_scalar(ureg_src(texel), TGSI_SWIZZLE_Z));
231 ureg_SLE(shader, ureg_writemask(temp[1],TGSI_WRITEMASK_W),
232 ureg_src(temp[0]), ureg_scalar(lumakey, TGSI_SWIZZLE_X));
233 ureg_SGT(shader, ureg_writemask(temp[0],TGSI_WRITEMASK_W),
234 ureg_src(temp[0]), ureg_scalar(lumakey, TGSI_SWIZZLE_Y));
235 ureg_MAX(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W),
236 ureg_src(temp[0]), ureg_src(temp[1]));
237
238 for (i = 0; i < 2; ++i)
239 ureg_release_temporary(shader, temp[i]);
240 }
241
242 static void *
243 create_frag_shader_video_buffer(struct vl_compositor *c)
244 {
245 struct ureg_program *shader;
246 struct ureg_src tc;
247 struct ureg_src sampler[3];
248 struct ureg_dst texel;
249 struct ureg_dst fragment;
250 unsigned i;
251
252 shader = ureg_create(PIPE_SHADER_FRAGMENT);
253 if (!shader)
254 return false;
255
256 tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR);
257 for (i = 0; i < 3; ++i) {
258 sampler[i] = ureg_DECL_sampler(shader, i);
259 ureg_DECL_sampler_view(shader, i, TGSI_TEXTURE_2D_ARRAY,
260 TGSI_RETURN_TYPE_FLOAT,
261 TGSI_RETURN_TYPE_FLOAT,
262 TGSI_RETURN_TYPE_FLOAT,
263 TGSI_RETURN_TYPE_FLOAT);
264 }
265
266 texel = ureg_DECL_temporary(shader);
267 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
268
269 /*
270 * texel.xyz = tex(tc, sampler[i])
271 * fragment = csc * texel
272 */
273 for (i = 0; i < 3; ++i)
274 ureg_TEX(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_2D_ARRAY, tc, sampler[i]);
275
276 create_frag_shader_csc(shader, texel, fragment);
277
278 ureg_release_temporary(shader, texel);
279 ureg_END(shader);
280
281 return ureg_create_shader_and_destroy(shader, c->pipe);
282 }
283
284 static void *
285 create_frag_shader_weave_rgb(struct vl_compositor *c)
286 {
287 struct ureg_program *shader;
288 struct ureg_dst texel, fragment;
289
290 shader = ureg_create(PIPE_SHADER_FRAGMENT);
291 if (!shader)
292 return false;
293
294 texel = ureg_DECL_temporary(shader);
295 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
296
297 create_frag_shader_weave(shader, texel);
298 create_frag_shader_csc(shader, texel, fragment);
299
300 ureg_release_temporary(shader, texel);
301
302 ureg_END(shader);
303
304 return ureg_create_shader_and_destroy(shader, c->pipe);
305 }
306
307 static void *
308 create_frag_shader_weave_yuv(struct vl_compositor *c, bool y)
309 {
310 struct ureg_program *shader;
311 struct ureg_dst texel, fragment;
312
313 shader = ureg_create(PIPE_SHADER_FRAGMENT);
314 if (!shader)
315 return false;
316
317 texel = ureg_DECL_temporary(shader);
318 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
319
320 create_frag_shader_weave(shader, texel);
321
322 if (y)
323 ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X), ureg_src(texel));
324 else
325 ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XY),
326 ureg_swizzle(ureg_src(texel), TGSI_SWIZZLE_Y,
327 TGSI_SWIZZLE_Z, TGSI_SWIZZLE_W, TGSI_SWIZZLE_W));
328
329 ureg_release_temporary(shader, texel);
330
331 ureg_END(shader);
332
333 return ureg_create_shader_and_destroy(shader, c->pipe);
334 }
335
336 static void *
337 create_frag_shader_palette(struct vl_compositor *c, bool include_cc)
338 {
339 struct ureg_program *shader;
340 struct ureg_src csc[3];
341 struct ureg_src tc;
342 struct ureg_src sampler;
343 struct ureg_src palette;
344 struct ureg_dst texel;
345 struct ureg_dst fragment;
346 unsigned i;
347
348 shader = ureg_create(PIPE_SHADER_FRAGMENT);
349 if (!shader)
350 return false;
351
352 for (i = 0; include_cc && i < 3; ++i)
353 csc[i] = ureg_DECL_constant(shader, i);
354
355 tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR);
356 sampler = ureg_DECL_sampler(shader, 0);
357 ureg_DECL_sampler_view(shader, 0, TGSI_TEXTURE_2D,
358 TGSI_RETURN_TYPE_FLOAT,
359 TGSI_RETURN_TYPE_FLOAT,
360 TGSI_RETURN_TYPE_FLOAT,
361 TGSI_RETURN_TYPE_FLOAT);
362 palette = ureg_DECL_sampler(shader, 1);
363 ureg_DECL_sampler_view(shader, 1, TGSI_TEXTURE_1D,
364 TGSI_RETURN_TYPE_FLOAT,
365 TGSI_RETURN_TYPE_FLOAT,
366 TGSI_RETURN_TYPE_FLOAT,
367 TGSI_RETURN_TYPE_FLOAT);
368
369 texel = ureg_DECL_temporary(shader);
370 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
371
372 /*
373 * texel = tex(tc, sampler)
374 * fragment.xyz = tex(texel, palette) * csc
375 * fragment.a = texel.a
376 */
377 ureg_TEX(shader, texel, TGSI_TEXTURE_2D, tc, sampler);
378 ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_src(texel));
379
380 if (include_cc) {
381 ureg_TEX(shader, texel, TGSI_TEXTURE_1D, ureg_src(texel), palette);
382 for (i = 0; i < 3; ++i)
383 ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(texel));
384 } else {
385 ureg_TEX(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ),
386 TGSI_TEXTURE_1D, ureg_src(texel), palette);
387 }
388
389 ureg_release_temporary(shader, texel);
390 ureg_END(shader);
391
392 return ureg_create_shader_and_destroy(shader, c->pipe);
393 }
394
395 static void *
396 create_frag_shader_rgba(struct vl_compositor *c)
397 {
398 struct ureg_program *shader;
399 struct ureg_src tc, color, sampler;
400 struct ureg_dst texel, fragment;
401
402 shader = ureg_create(PIPE_SHADER_FRAGMENT);
403 if (!shader)
404 return false;
405
406 tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR);
407 color = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_COLOR, VS_O_COLOR, TGSI_INTERPOLATE_LINEAR);
408 sampler = ureg_DECL_sampler(shader, 0);
409 ureg_DECL_sampler_view(shader, 0, TGSI_TEXTURE_2D,
410 TGSI_RETURN_TYPE_FLOAT,
411 TGSI_RETURN_TYPE_FLOAT,
412 TGSI_RETURN_TYPE_FLOAT,
413 TGSI_RETURN_TYPE_FLOAT);
414 texel = ureg_DECL_temporary(shader);
415 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
416
417 /*
418 * fragment = tex(tc, sampler)
419 */
420 ureg_TEX(shader, texel, TGSI_TEXTURE_2D, tc, sampler);
421 ureg_MUL(shader, fragment, ureg_src(texel), color);
422 ureg_END(shader);
423
424 return ureg_create_shader_and_destroy(shader, c->pipe);
425 }
426
427 static bool
428 init_shaders(struct vl_compositor *c)
429 {
430 assert(c);
431
432 c->vs = create_vert_shader(c);
433 if (!c->vs) {
434 debug_printf("Unable to create vertex shader.\n");
435 return false;
436 }
437
438 c->fs_video_buffer = create_frag_shader_video_buffer(c);
439 if (!c->fs_video_buffer) {
440 debug_printf("Unable to create YCbCr-to-RGB fragment shader.\n");
441 return false;
442 }
443
444 c->fs_weave_rgb = create_frag_shader_weave_rgb(c);
445 if (!c->fs_weave_rgb) {
446 debug_printf("Unable to create YCbCr-to-RGB weave fragment shader.\n");
447 return false;
448 }
449
450 c->fs_weave_yuv.y = create_frag_shader_weave_yuv(c, true);
451 c->fs_weave_yuv.uv = create_frag_shader_weave_yuv(c, false);
452 if (!c->fs_weave_yuv.y || !c->fs_weave_yuv.uv) {
453 debug_printf("Unable to create YCbCr i-to-YCbCr p weave fragment shader.\n");
454 return false;
455 }
456
457 c->fs_palette.yuv = create_frag_shader_palette(c, true);
458 if (!c->fs_palette.yuv) {
459 debug_printf("Unable to create YUV-Palette-to-RGB fragment shader.\n");
460 return false;
461 }
462
463 c->fs_palette.rgb = create_frag_shader_palette(c, false);
464 if (!c->fs_palette.rgb) {
465 debug_printf("Unable to create RGB-Palette-to-RGB fragment shader.\n");
466 return false;
467 }
468
469 c->fs_rgba = create_frag_shader_rgba(c);
470 if (!c->fs_rgba) {
471 debug_printf("Unable to create RGB-to-RGB fragment shader.\n");
472 return false;
473 }
474
475 return true;
476 }
477
478 static void cleanup_shaders(struct vl_compositor *c)
479 {
480 assert(c);
481
482 c->pipe->delete_vs_state(c->pipe, c->vs);
483 c->pipe->delete_fs_state(c->pipe, c->fs_video_buffer);
484 c->pipe->delete_fs_state(c->pipe, c->fs_weave_rgb);
485 c->pipe->delete_fs_state(c->pipe, c->fs_weave_yuv.y);
486 c->pipe->delete_fs_state(c->pipe, c->fs_weave_yuv.uv);
487 c->pipe->delete_fs_state(c->pipe, c->fs_palette.yuv);
488 c->pipe->delete_fs_state(c->pipe, c->fs_palette.rgb);
489 c->pipe->delete_fs_state(c->pipe, c->fs_rgba);
490 }
491
492 static bool
493 init_pipe_state(struct vl_compositor *c)
494 {
495 struct pipe_rasterizer_state rast;
496 struct pipe_sampler_state sampler;
497 struct pipe_blend_state blend;
498 struct pipe_depth_stencil_alpha_state dsa;
499 unsigned i;
500
501 assert(c);
502
503 c->fb_state.nr_cbufs = 1;
504 c->fb_state.zsbuf = NULL;
505
506 memset(&sampler, 0, sizeof(sampler));
507 sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
508 sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
509 sampler.wrap_r = PIPE_TEX_WRAP_REPEAT;
510 sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
511 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
512 sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
513 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
514 sampler.compare_func = PIPE_FUNC_ALWAYS;
515 sampler.normalized_coords = 1;
516
517 c->sampler_linear = c->pipe->create_sampler_state(c->pipe, &sampler);
518
519 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
520 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
521 c->sampler_nearest = c->pipe->create_sampler_state(c->pipe, &sampler);
522
523 memset(&blend, 0, sizeof blend);
524 blend.independent_blend_enable = 0;
525 blend.rt[0].blend_enable = 0;
526 blend.logicop_enable = 0;
527 blend.logicop_func = PIPE_LOGICOP_CLEAR;
528 blend.rt[0].colormask = PIPE_MASK_RGBA;
529 blend.dither = 0;
530 c->blend_clear = c->pipe->create_blend_state(c->pipe, &blend);
531
532 blend.rt[0].blend_enable = 1;
533 blend.rt[0].rgb_func = PIPE_BLEND_ADD;
534 blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
535 blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_INV_SRC_ALPHA;
536 blend.rt[0].alpha_func = PIPE_BLEND_ADD;
537 blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
538 blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
539 c->blend_add = c->pipe->create_blend_state(c->pipe, &blend);
540
541 memset(&rast, 0, sizeof rast);
542 rast.flatshade = 0;
543 rast.front_ccw = 1;
544 rast.cull_face = PIPE_FACE_NONE;
545 rast.fill_back = PIPE_POLYGON_MODE_FILL;
546 rast.fill_front = PIPE_POLYGON_MODE_FILL;
547 rast.scissor = 1;
548 rast.line_width = 1;
549 rast.point_size_per_vertex = 1;
550 rast.offset_units = 1;
551 rast.offset_scale = 1;
552 rast.half_pixel_center = 1;
553 rast.bottom_edge_rule = 1;
554 rast.depth_clip = 1;
555
556 c->rast = c->pipe->create_rasterizer_state(c->pipe, &rast);
557
558 memset(&dsa, 0, sizeof dsa);
559 dsa.depth.enabled = 0;
560 dsa.depth.writemask = 0;
561 dsa.depth.func = PIPE_FUNC_ALWAYS;
562 for (i = 0; i < 2; ++i) {
563 dsa.stencil[i].enabled = 0;
564 dsa.stencil[i].func = PIPE_FUNC_ALWAYS;
565 dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP;
566 dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP;
567 dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP;
568 dsa.stencil[i].valuemask = 0;
569 dsa.stencil[i].writemask = 0;
570 }
571 dsa.alpha.enabled = 0;
572 dsa.alpha.func = PIPE_FUNC_ALWAYS;
573 dsa.alpha.ref_value = 0;
574 c->dsa = c->pipe->create_depth_stencil_alpha_state(c->pipe, &dsa);
575 c->pipe->bind_depth_stencil_alpha_state(c->pipe, c->dsa);
576
577 return true;
578 }
579
580 static void cleanup_pipe_state(struct vl_compositor *c)
581 {
582 assert(c);
583
584 /* Asserted in softpipe_delete_fs_state() for some reason */
585 c->pipe->bind_vs_state(c->pipe, NULL);
586 c->pipe->bind_fs_state(c->pipe, NULL);
587
588 c->pipe->delete_depth_stencil_alpha_state(c->pipe, c->dsa);
589 c->pipe->delete_sampler_state(c->pipe, c->sampler_linear);
590 c->pipe->delete_sampler_state(c->pipe, c->sampler_nearest);
591 c->pipe->delete_blend_state(c->pipe, c->blend_clear);
592 c->pipe->delete_blend_state(c->pipe, c->blend_add);
593 c->pipe->delete_rasterizer_state(c->pipe, c->rast);
594 }
595
596 static bool
597 init_buffers(struct vl_compositor *c)
598 {
599 struct pipe_vertex_element vertex_elems[3];
600
601 assert(c);
602
603 /*
604 * Create our vertex buffer and vertex buffer elements
605 */
606 c->vertex_buf.stride = sizeof(struct vertex2f) + sizeof(struct vertex4f) * 2;
607 c->vertex_buf.buffer_offset = 0;
608 c->vertex_buf.buffer.resource = NULL;
609 c->vertex_buf.is_user_buffer = false;
610
611 vertex_elems[0].src_offset = 0;
612 vertex_elems[0].instance_divisor = 0;
613 vertex_elems[0].vertex_buffer_index = 0;
614 vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
615 vertex_elems[1].src_offset = sizeof(struct vertex2f);
616 vertex_elems[1].instance_divisor = 0;
617 vertex_elems[1].vertex_buffer_index = 0;
618 vertex_elems[1].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
619 vertex_elems[2].src_offset = sizeof(struct vertex2f) + sizeof(struct vertex4f);
620 vertex_elems[2].instance_divisor = 0;
621 vertex_elems[2].vertex_buffer_index = 0;
622 vertex_elems[2].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
623 c->vertex_elems_state = c->pipe->create_vertex_elements_state(c->pipe, 3, vertex_elems);
624
625 return true;
626 }
627
628 static void
629 cleanup_buffers(struct vl_compositor *c)
630 {
631 assert(c);
632
633 c->pipe->delete_vertex_elements_state(c->pipe, c->vertex_elems_state);
634 pipe_resource_reference(&c->vertex_buf.buffer.resource, NULL);
635 }
636
637 static inline struct u_rect
638 default_rect(struct vl_compositor_layer *layer)
639 {
640 struct pipe_resource *res = layer->sampler_views[0]->texture;
641 struct u_rect rect = { 0, res->width0, 0, res->height0 * res->array_size };
642 return rect;
643 }
644
645 static inline struct vertex2f
646 calc_topleft(struct vertex2f size, struct u_rect rect)
647 {
648 struct vertex2f res = { rect.x0 / size.x, rect.y0 / size.y };
649 return res;
650 }
651
652 static inline struct vertex2f
653 calc_bottomright(struct vertex2f size, struct u_rect rect)
654 {
655 struct vertex2f res = { rect.x1 / size.x, rect.y1 / size.y };
656 return res;
657 }
658
659 static inline void
660 calc_src_and_dst(struct vl_compositor_layer *layer, unsigned width, unsigned height,
661 struct u_rect src, struct u_rect dst)
662 {
663 struct vertex2f size = { width, height };
664
665 layer->src.tl = calc_topleft(size, src);
666 layer->src.br = calc_bottomright(size, src);
667 layer->dst.tl = calc_topleft(size, dst);
668 layer->dst.br = calc_bottomright(size, dst);
669 layer->zw.x = 0.0f;
670 layer->zw.y = size.y;
671 }
672
673 static void
674 gen_rect_verts(struct vertex2f *vb, struct vl_compositor_layer *layer)
675 {
676 struct vertex2f tl, tr, br, bl;
677
678 assert(vb && layer);
679
680 switch (layer->rotate) {
681 default:
682 case VL_COMPOSITOR_ROTATE_0:
683 tl = layer->dst.tl;
684 tr.x = layer->dst.br.x;
685 tr.y = layer->dst.tl.y;
686 br = layer->dst.br;
687 bl.x = layer->dst.tl.x;
688 bl.y = layer->dst.br.y;
689 break;
690 case VL_COMPOSITOR_ROTATE_90:
691 tl.x = layer->dst.br.x;
692 tl.y = layer->dst.tl.y;
693 tr = layer->dst.br;
694 br.x = layer->dst.tl.x;
695 br.y = layer->dst.br.y;
696 bl = layer->dst.tl;
697 break;
698 case VL_COMPOSITOR_ROTATE_180:
699 tl = layer->dst.br;
700 tr.x = layer->dst.tl.x;
701 tr.y = layer->dst.br.y;
702 br = layer->dst.tl;
703 bl.x = layer->dst.br.x;
704 bl.y = layer->dst.tl.y;
705 break;
706 case VL_COMPOSITOR_ROTATE_270:
707 tl.x = layer->dst.tl.x;
708 tl.y = layer->dst.br.y;
709 tr = layer->dst.tl;
710 br.x = layer->dst.br.x;
711 br.y = layer->dst.tl.y;
712 bl = layer->dst.br;
713 break;
714 }
715
716 vb[ 0].x = tl.x;
717 vb[ 0].y = tl.y;
718 vb[ 1].x = layer->src.tl.x;
719 vb[ 1].y = layer->src.tl.y;
720 vb[ 2] = layer->zw;
721 vb[ 3].x = layer->colors[0].x;
722 vb[ 3].y = layer->colors[0].y;
723 vb[ 4].x = layer->colors[0].z;
724 vb[ 4].y = layer->colors[0].w;
725
726 vb[ 5].x = tr.x;
727 vb[ 5].y = tr.y;
728 vb[ 6].x = layer->src.br.x;
729 vb[ 6].y = layer->src.tl.y;
730 vb[ 7] = layer->zw;
731 vb[ 8].x = layer->colors[1].x;
732 vb[ 8].y = layer->colors[1].y;
733 vb[ 9].x = layer->colors[1].z;
734 vb[ 9].y = layer->colors[1].w;
735
736 vb[10].x = br.x;
737 vb[10].y = br.y;
738 vb[11].x = layer->src.br.x;
739 vb[11].y = layer->src.br.y;
740 vb[12] = layer->zw;
741 vb[13].x = layer->colors[2].x;
742 vb[13].y = layer->colors[2].y;
743 vb[14].x = layer->colors[2].z;
744 vb[14].y = layer->colors[2].w;
745
746 vb[15].x = bl.x;
747 vb[15].y = bl.y;
748 vb[16].x = layer->src.tl.x;
749 vb[16].y = layer->src.br.y;
750 vb[17] = layer->zw;
751 vb[18].x = layer->colors[3].x;
752 vb[18].y = layer->colors[3].y;
753 vb[19].x = layer->colors[3].z;
754 vb[19].y = layer->colors[3].w;
755 }
756
757 static inline struct u_rect
758 calc_drawn_area(struct vl_compositor_state *s, struct vl_compositor_layer *layer)
759 {
760 struct vertex2f tl, br;
761 struct u_rect result;
762
763 assert(s && layer);
764
765 // rotate
766 switch (layer->rotate) {
767 default:
768 case VL_COMPOSITOR_ROTATE_0:
769 tl = layer->dst.tl;
770 br = layer->dst.br;
771 break;
772 case VL_COMPOSITOR_ROTATE_90:
773 tl.x = layer->dst.br.x;
774 tl.y = layer->dst.tl.y;
775 br.x = layer->dst.tl.x;
776 br.y = layer->dst.br.y;
777 break;
778 case VL_COMPOSITOR_ROTATE_180:
779 tl = layer->dst.br;
780 br = layer->dst.tl;
781 break;
782 case VL_COMPOSITOR_ROTATE_270:
783 tl.x = layer->dst.tl.x;
784 tl.y = layer->dst.br.y;
785 br.x = layer->dst.br.x;
786 br.y = layer->dst.tl.y;
787 break;
788 }
789
790 // scale
791 result.x0 = tl.x * layer->viewport.scale[0] + layer->viewport.translate[0];
792 result.y0 = tl.y * layer->viewport.scale[1] + layer->viewport.translate[1];
793 result.x1 = br.x * layer->viewport.scale[0] + layer->viewport.translate[0];
794 result.y1 = br.y * layer->viewport.scale[1] + layer->viewport.translate[1];
795
796 // and clip
797 result.x0 = MAX2(result.x0, s->scissor.minx);
798 result.y0 = MAX2(result.y0, s->scissor.miny);
799 result.x1 = MIN2(result.x1, s->scissor.maxx);
800 result.y1 = MIN2(result.y1, s->scissor.maxy);
801 return result;
802 }
803
804 static void
805 gen_vertex_data(struct vl_compositor *c, struct vl_compositor_state *s, struct u_rect *dirty)
806 {
807 struct vertex2f *vb;
808 unsigned i;
809
810 assert(c);
811
812 /* Allocate new memory for vertices. */
813 u_upload_alloc(c->pipe->stream_uploader, 0,
814 c->vertex_buf.stride * VL_COMPOSITOR_MAX_LAYERS * 4, /* size */
815 4, /* alignment */
816 &c->vertex_buf.buffer_offset, &c->vertex_buf.buffer.resource,
817 (void**)&vb);
818
819 for (i = 0; i < VL_COMPOSITOR_MAX_LAYERS; i++) {
820 if (s->used_layers & (1 << i)) {
821 struct vl_compositor_layer *layer = &s->layers[i];
822 gen_rect_verts(vb, layer);
823 vb += 20;
824
825 if (!layer->viewport_valid) {
826 layer->viewport.scale[0] = c->fb_state.width;
827 layer->viewport.scale[1] = c->fb_state.height;
828 layer->viewport.translate[0] = 0;
829 layer->viewport.translate[1] = 0;
830 }
831
832 if (dirty && layer->clearing) {
833 struct u_rect drawn = calc_drawn_area(s, layer);
834 if (
835 dirty->x0 >= drawn.x0 &&
836 dirty->y0 >= drawn.y0 &&
837 dirty->x1 <= drawn.x1 &&
838 dirty->y1 <= drawn.y1) {
839
840 // We clear the dirty area anyway, no need for clear_render_target
841 dirty->x0 = dirty->y0 = MAX_DIRTY;
842 dirty->x1 = dirty->y1 = MIN_DIRTY;
843 }
844 }
845 }
846 }
847
848 u_upload_unmap(c->pipe->stream_uploader);
849 }
850
851 static void
852 draw_layers(struct vl_compositor *c, struct vl_compositor_state *s, struct u_rect *dirty)
853 {
854 unsigned vb_index, i;
855
856 assert(c);
857
858 for (i = 0, vb_index = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i) {
859 if (s->used_layers & (1 << i)) {
860 struct vl_compositor_layer *layer = &s->layers[i];
861 struct pipe_sampler_view **samplers = &layer->sampler_views[0];
862 unsigned num_sampler_views = !samplers[1] ? 1 : !samplers[2] ? 2 : 3;
863 void *blend = layer->blend ? layer->blend : i ? c->blend_add : c->blend_clear;
864
865 c->pipe->bind_blend_state(c->pipe, blend);
866 c->pipe->set_viewport_states(c->pipe, 0, 1, &layer->viewport);
867 c->pipe->bind_fs_state(c->pipe, layer->fs);
868 c->pipe->bind_sampler_states(c->pipe, PIPE_SHADER_FRAGMENT, 0,
869 num_sampler_views, layer->samplers);
870 c->pipe->set_sampler_views(c->pipe, PIPE_SHADER_FRAGMENT, 0,
871 num_sampler_views, samplers);
872
873 util_draw_arrays(c->pipe, PIPE_PRIM_QUADS, vb_index * 4, 4);
874 vb_index++;
875
876 if (dirty) {
877 // Remember the currently drawn area as dirty for the next draw command
878 struct u_rect drawn = calc_drawn_area(s, layer);
879 dirty->x0 = MIN2(drawn.x0, dirty->x0);
880 dirty->y0 = MIN2(drawn.y0, dirty->y0);
881 dirty->x1 = MAX2(drawn.x1, dirty->x1);
882 dirty->y1 = MAX2(drawn.y1, dirty->y1);
883 }
884 }
885 }
886 }
887
888 static void
889 set_yuv_layer(struct vl_compositor_state *s, struct vl_compositor *c, unsigned layer,
890 struct pipe_video_buffer *buffer, struct u_rect *src_rect,
891 struct u_rect *dst_rect, bool y)
892 {
893 struct pipe_sampler_view **sampler_views;
894 unsigned i;
895
896 assert(s && c && buffer);
897
898 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
899
900 s->used_layers |= 1 << layer;
901 sampler_views = buffer->get_sampler_view_components(buffer);
902 for (i = 0; i < 3; ++i) {
903 s->layers[layer].samplers[i] = c->sampler_linear;
904 pipe_sampler_view_reference(&s->layers[layer].sampler_views[i], sampler_views[i]);
905 }
906
907 calc_src_and_dst(&s->layers[layer], buffer->width, buffer->height,
908 src_rect ? *src_rect : default_rect(&s->layers[layer]),
909 dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
910
911 s->layers[layer].fs = (y) ? c->fs_weave_yuv.y : c->fs_weave_yuv.uv;
912 }
913
914 void
915 vl_compositor_reset_dirty_area(struct u_rect *dirty)
916 {
917 assert(dirty);
918
919 dirty->x0 = dirty->y0 = MIN_DIRTY;
920 dirty->x1 = dirty->y1 = MAX_DIRTY;
921 }
922
923 void
924 vl_compositor_set_clear_color(struct vl_compositor_state *s, union pipe_color_union *color)
925 {
926 assert(s);
927 assert(color);
928
929 s->clear_color = *color;
930 }
931
932 void
933 vl_compositor_get_clear_color(struct vl_compositor_state *s, union pipe_color_union *color)
934 {
935 assert(s);
936 assert(color);
937
938 *color = s->clear_color;
939 }
940
941 void
942 vl_compositor_clear_layers(struct vl_compositor_state *s)
943 {
944 unsigned i, j;
945
946 assert(s);
947
948 s->used_layers = 0;
949 for ( i = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i) {
950 struct vertex4f v_one = { 1.0f, 1.0f, 1.0f, 1.0f };
951 s->layers[i].clearing = i ? false : true;
952 s->layers[i].blend = NULL;
953 s->layers[i].fs = NULL;
954 s->layers[i].viewport.scale[2] = 1;
955 s->layers[i].viewport.translate[2] = 0;
956 s->layers[i].rotate = VL_COMPOSITOR_ROTATE_0;
957
958 for ( j = 0; j < 3; j++)
959 pipe_sampler_view_reference(&s->layers[i].sampler_views[j], NULL);
960 for ( j = 0; j < 4; ++j)
961 s->layers[i].colors[j] = v_one;
962 }
963 }
964
965 void
966 vl_compositor_cleanup(struct vl_compositor *c)
967 {
968 assert(c);
969
970 cleanup_buffers(c);
971 cleanup_shaders(c);
972 cleanup_pipe_state(c);
973 }
974
975 bool
976 vl_compositor_set_csc_matrix(struct vl_compositor_state *s,
977 vl_csc_matrix const *matrix,
978 float luma_min, float luma_max)
979 {
980 struct pipe_transfer *buf_transfer;
981
982 assert(s);
983
984 float *ptr = pipe_buffer_map(s->pipe, s->csc_matrix,
985 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
986 &buf_transfer);
987
988 if (!ptr)
989 return false;
990
991 memcpy(ptr, matrix, sizeof(vl_csc_matrix));
992
993 ptr += sizeof(vl_csc_matrix)/sizeof(float);
994 ptr[0] = luma_min;
995 ptr[1] = luma_max;
996
997 pipe_buffer_unmap(s->pipe, buf_transfer);
998
999 return true;
1000 }
1001
1002 void
1003 vl_compositor_set_dst_clip(struct vl_compositor_state *s, struct u_rect *dst_clip)
1004 {
1005 assert(s);
1006
1007 s->scissor_valid = dst_clip != NULL;
1008 if (dst_clip) {
1009 s->scissor.minx = dst_clip->x0;
1010 s->scissor.miny = dst_clip->y0;
1011 s->scissor.maxx = dst_clip->x1;
1012 s->scissor.maxy = dst_clip->y1;
1013 }
1014 }
1015
1016 void
1017 vl_compositor_set_layer_blend(struct vl_compositor_state *s,
1018 unsigned layer, void *blend,
1019 bool is_clearing)
1020 {
1021 assert(s && blend);
1022
1023 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
1024
1025 s->layers[layer].clearing = is_clearing;
1026 s->layers[layer].blend = blend;
1027 }
1028
1029 void
1030 vl_compositor_set_layer_dst_area(struct vl_compositor_state *s,
1031 unsigned layer, struct u_rect *dst_area)
1032 {
1033 assert(s);
1034
1035 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
1036
1037 s->layers[layer].viewport_valid = dst_area != NULL;
1038 if (dst_area) {
1039 s->layers[layer].viewport.scale[0] = dst_area->x1 - dst_area->x0;
1040 s->layers[layer].viewport.scale[1] = dst_area->y1 - dst_area->y0;
1041 s->layers[layer].viewport.translate[0] = dst_area->x0;
1042 s->layers[layer].viewport.translate[1] = dst_area->y0;
1043 }
1044 }
1045
1046 void
1047 vl_compositor_set_buffer_layer(struct vl_compositor_state *s,
1048 struct vl_compositor *c,
1049 unsigned layer,
1050 struct pipe_video_buffer *buffer,
1051 struct u_rect *src_rect,
1052 struct u_rect *dst_rect,
1053 enum vl_compositor_deinterlace deinterlace)
1054 {
1055 struct pipe_sampler_view **sampler_views;
1056 unsigned i;
1057
1058 assert(s && c && buffer);
1059
1060 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
1061
1062 s->used_layers |= 1 << layer;
1063 sampler_views = buffer->get_sampler_view_components(buffer);
1064 for (i = 0; i < 3; ++i) {
1065 s->layers[layer].samplers[i] = c->sampler_linear;
1066 pipe_sampler_view_reference(&s->layers[layer].sampler_views[i], sampler_views[i]);
1067 }
1068
1069 calc_src_and_dst(&s->layers[layer], buffer->width, buffer->height,
1070 src_rect ? *src_rect : default_rect(&s->layers[layer]),
1071 dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
1072
1073 if (buffer->interlaced) {
1074 float half_a_line = 0.5f / s->layers[layer].zw.y;
1075 switch(deinterlace) {
1076 case VL_COMPOSITOR_WEAVE:
1077 s->layers[layer].fs = c->fs_weave_rgb;
1078 break;
1079
1080 case VL_COMPOSITOR_BOB_TOP:
1081 s->layers[layer].zw.x = 0.0f;
1082 s->layers[layer].src.tl.y += half_a_line;
1083 s->layers[layer].src.br.y += half_a_line;
1084 s->layers[layer].fs = c->fs_video_buffer;
1085 break;
1086
1087 case VL_COMPOSITOR_BOB_BOTTOM:
1088 s->layers[layer].zw.x = 1.0f;
1089 s->layers[layer].src.tl.y -= half_a_line;
1090 s->layers[layer].src.br.y -= half_a_line;
1091 s->layers[layer].fs = c->fs_video_buffer;
1092 break;
1093 }
1094
1095 } else
1096 s->layers[layer].fs = c->fs_video_buffer;
1097 }
1098
1099 void
1100 vl_compositor_set_palette_layer(struct vl_compositor_state *s,
1101 struct vl_compositor *c,
1102 unsigned layer,
1103 struct pipe_sampler_view *indexes,
1104 struct pipe_sampler_view *palette,
1105 struct u_rect *src_rect,
1106 struct u_rect *dst_rect,
1107 bool include_color_conversion)
1108 {
1109 assert(s && c && indexes && palette);
1110
1111 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
1112
1113 s->used_layers |= 1 << layer;
1114
1115 s->layers[layer].fs = include_color_conversion ?
1116 c->fs_palette.yuv : c->fs_palette.rgb;
1117
1118 s->layers[layer].samplers[0] = c->sampler_linear;
1119 s->layers[layer].samplers[1] = c->sampler_nearest;
1120 s->layers[layer].samplers[2] = NULL;
1121 pipe_sampler_view_reference(&s->layers[layer].sampler_views[0], indexes);
1122 pipe_sampler_view_reference(&s->layers[layer].sampler_views[1], palette);
1123 pipe_sampler_view_reference(&s->layers[layer].sampler_views[2], NULL);
1124 calc_src_and_dst(&s->layers[layer], indexes->texture->width0, indexes->texture->height0,
1125 src_rect ? *src_rect : default_rect(&s->layers[layer]),
1126 dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
1127 }
1128
1129 void
1130 vl_compositor_set_rgba_layer(struct vl_compositor_state *s,
1131 struct vl_compositor *c,
1132 unsigned layer,
1133 struct pipe_sampler_view *rgba,
1134 struct u_rect *src_rect,
1135 struct u_rect *dst_rect,
1136 struct vertex4f *colors)
1137 {
1138 unsigned i;
1139
1140 assert(s && c && rgba);
1141
1142 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
1143
1144 s->used_layers |= 1 << layer;
1145 s->layers[layer].fs = c->fs_rgba;
1146 s->layers[layer].samplers[0] = c->sampler_linear;
1147 s->layers[layer].samplers[1] = NULL;
1148 s->layers[layer].samplers[2] = NULL;
1149 pipe_sampler_view_reference(&s->layers[layer].sampler_views[0], rgba);
1150 pipe_sampler_view_reference(&s->layers[layer].sampler_views[1], NULL);
1151 pipe_sampler_view_reference(&s->layers[layer].sampler_views[2], NULL);
1152 calc_src_and_dst(&s->layers[layer], rgba->texture->width0, rgba->texture->height0,
1153 src_rect ? *src_rect : default_rect(&s->layers[layer]),
1154 dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
1155
1156 if (colors)
1157 for (i = 0; i < 4; ++i)
1158 s->layers[layer].colors[i] = colors[i];
1159 }
1160
1161 void
1162 vl_compositor_set_layer_rotation(struct vl_compositor_state *s,
1163 unsigned layer,
1164 enum vl_compositor_rotation rotate)
1165 {
1166 assert(s);
1167 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
1168 s->layers[layer].rotate = rotate;
1169 }
1170
1171 void
1172 vl_compositor_render(struct vl_compositor_state *s,
1173 struct vl_compositor *c,
1174 struct pipe_surface *dst_surface,
1175 struct u_rect *dirty_area,
1176 bool clear_dirty)
1177 {
1178 assert(c);
1179 assert(dst_surface);
1180
1181 c->fb_state.width = dst_surface->width;
1182 c->fb_state.height = dst_surface->height;
1183 c->fb_state.cbufs[0] = dst_surface;
1184
1185 if (!s->scissor_valid) {
1186 s->scissor.minx = 0;
1187 s->scissor.miny = 0;
1188 s->scissor.maxx = dst_surface->width;
1189 s->scissor.maxy = dst_surface->height;
1190 }
1191 c->pipe->set_scissor_states(c->pipe, 0, 1, &s->scissor);
1192
1193 gen_vertex_data(c, s, dirty_area);
1194
1195 if (clear_dirty && dirty_area &&
1196 (dirty_area->x0 < dirty_area->x1 || dirty_area->y0 < dirty_area->y1)) {
1197
1198 c->pipe->clear_render_target(c->pipe, dst_surface, &s->clear_color,
1199 0, 0, dst_surface->width, dst_surface->height, false);
1200 dirty_area->x0 = dirty_area->y0 = MAX_DIRTY;
1201 dirty_area->x1 = dirty_area->y1 = MIN_DIRTY;
1202 }
1203
1204 c->pipe->set_framebuffer_state(c->pipe, &c->fb_state);
1205 c->pipe->bind_vs_state(c->pipe, c->vs);
1206 c->pipe->set_vertex_buffers(c->pipe, 0, 1, &c->vertex_buf);
1207 c->pipe->bind_vertex_elements_state(c->pipe, c->vertex_elems_state);
1208 pipe_set_constant_buffer(c->pipe, PIPE_SHADER_FRAGMENT, 0, s->csc_matrix);
1209 c->pipe->bind_rasterizer_state(c->pipe, c->rast);
1210
1211 draw_layers(c, s, dirty_area);
1212 }
1213
1214 void
1215 vl_compositor_yuv_deint(struct vl_compositor_state *s,
1216 struct vl_compositor *c,
1217 struct pipe_video_buffer *src,
1218 struct pipe_video_buffer *dst)
1219 {
1220 struct pipe_surface **dst_surfaces;
1221 struct u_rect dst_rect;
1222
1223 dst_surfaces = dst->get_surfaces(dst);
1224 vl_compositor_clear_layers(s);
1225
1226 dst_rect.x0 = 0;
1227 dst_rect.x1 = src->width;
1228 dst_rect.y0 = 0;
1229 dst_rect.y1 = src->height;
1230
1231 set_yuv_layer(s, c, 0, src, NULL, NULL, true);
1232 vl_compositor_set_layer_dst_area(s, 0, &dst_rect);
1233 vl_compositor_render(s, c, dst_surfaces[0], NULL, false);
1234
1235 dst_rect.x1 /= 2;
1236 dst_rect.y1 /= 2;
1237
1238 set_yuv_layer(s, c, 0, src, NULL, NULL, false);
1239 vl_compositor_set_layer_dst_area(s, 0, &dst_rect);
1240 vl_compositor_render(s, c, dst_surfaces[1], NULL, false);
1241
1242 s->pipe->flush(s->pipe, NULL, 0);
1243 }
1244
1245 bool
1246 vl_compositor_init(struct vl_compositor *c, struct pipe_context *pipe)
1247 {
1248 assert(c);
1249
1250 memset(c, 0, sizeof(*c));
1251
1252 c->pipe = pipe;
1253
1254 if (!init_pipe_state(c)) {
1255 return false;
1256 }
1257
1258 if (!init_shaders(c)) {
1259 cleanup_pipe_state(c);
1260 return false;
1261 }
1262
1263 if (!init_buffers(c)) {
1264 cleanup_shaders(c);
1265 cleanup_pipe_state(c);
1266 return false;
1267 }
1268
1269 return true;
1270 }
1271
1272 bool
1273 vl_compositor_init_state(struct vl_compositor_state *s, struct pipe_context *pipe)
1274 {
1275 vl_csc_matrix csc_matrix;
1276
1277 assert(s);
1278
1279 memset(s, 0, sizeof(*s));
1280
1281 s->pipe = pipe;
1282
1283 s->clear_color.f[0] = s->clear_color.f[1] = 0.0f;
1284 s->clear_color.f[2] = s->clear_color.f[3] = 0.0f;
1285
1286 /*
1287 * Create our fragment shader's constant buffer
1288 * Const buffer contains the color conversion matrix and bias vectors
1289 */
1290 /* XXX: Create with IMMUTABLE/STATIC... although it does change every once in a long while... */
1291 s->csc_matrix = pipe_buffer_create
1292 (
1293 pipe->screen,
1294 PIPE_BIND_CONSTANT_BUFFER,
1295 PIPE_USAGE_DEFAULT,
1296 sizeof(csc_matrix) + 2*sizeof(float)
1297 );
1298
1299 if (!s->csc_matrix)
1300 return false;
1301
1302 vl_compositor_clear_layers(s);
1303
1304 vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_IDENTITY, NULL, true, &csc_matrix);
1305 if (!vl_compositor_set_csc_matrix(s, (const vl_csc_matrix *)&csc_matrix, 1.0f, 0.0f))
1306 return false;
1307
1308 return true;
1309 }
1310
1311 void
1312 vl_compositor_cleanup_state(struct vl_compositor_state *s)
1313 {
1314 assert(s);
1315
1316 vl_compositor_clear_layers(s);
1317 pipe_resource_reference(&s->csc_matrix, NULL);
1318 }