vl/compositor: move weave shader out from rgb weaving
[mesa.git] / src / gallium / auxiliary / vl / vl_compositor.c
1 /**************************************************************************
2 *
3 * Copyright 2009 Younes Manton.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <assert.h>
29
30 #include "pipe/p_compiler.h"
31 #include "pipe/p_context.h"
32
33 #include "util/u_memory.h"
34 #include "util/u_draw.h"
35 #include "util/u_surface.h"
36 #include "util/u_upload_mgr.h"
37
38 #include "tgsi/tgsi_ureg.h"
39
40 #include "vl_csc.h"
41 #include "vl_types.h"
42 #include "vl_compositor.h"
43
44 #define MIN_DIRTY (0)
45 #define MAX_DIRTY (1 << 15)
46
47 enum VS_OUTPUT
48 {
49 VS_O_VPOS = 0,
50 VS_O_COLOR = 0,
51 VS_O_VTEX = 0,
52 VS_O_VTOP,
53 VS_O_VBOTTOM,
54 };
55
56 static void *
57 create_vert_shader(struct vl_compositor *c)
58 {
59 struct ureg_program *shader;
60 struct ureg_src vpos, vtex, color;
61 struct ureg_dst tmp;
62 struct ureg_dst o_vpos, o_vtex, o_color;
63 struct ureg_dst o_vtop, o_vbottom;
64
65 shader = ureg_create(PIPE_SHADER_VERTEX);
66 if (!shader)
67 return false;
68
69 vpos = ureg_DECL_vs_input(shader, 0);
70 vtex = ureg_DECL_vs_input(shader, 1);
71 color = ureg_DECL_vs_input(shader, 2);
72 tmp = ureg_DECL_temporary(shader);
73 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
74 o_color = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, VS_O_COLOR);
75 o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX);
76 o_vtop = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP);
77 o_vbottom = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM);
78
79 /*
80 * o_vpos = vpos
81 * o_vtex = vtex
82 * o_color = color
83 */
84 ureg_MOV(shader, o_vpos, vpos);
85 ureg_MOV(shader, o_vtex, vtex);
86 ureg_MOV(shader, o_color, color);
87
88 /*
89 * tmp.x = vtex.w / 2
90 * tmp.y = vtex.w / 4
91 *
92 * o_vtop.x = vtex.x
93 * o_vtop.y = vtex.y * tmp.x + 0.25f
94 * o_vtop.z = vtex.y * tmp.y + 0.25f
95 * o_vtop.w = 1 / tmp.x
96 *
97 * o_vbottom.x = vtex.x
98 * o_vbottom.y = vtex.y * tmp.x - 0.25f
99 * o_vbottom.z = vtex.y * tmp.y - 0.25f
100 * o_vbottom.w = 1 / tmp.y
101 */
102 ureg_MUL(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X),
103 ureg_scalar(vtex, TGSI_SWIZZLE_W), ureg_imm1f(shader, 0.5f));
104 ureg_MUL(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y),
105 ureg_scalar(vtex, TGSI_SWIZZLE_W), ureg_imm1f(shader, 0.25f));
106
107 ureg_MOV(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_X), vtex);
108 ureg_MAD(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_Y), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
109 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X), ureg_imm1f(shader, 0.25f));
110 ureg_MAD(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_Z), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
111 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y), ureg_imm1f(shader, 0.25f));
112 ureg_RCP(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_W),
113 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X));
114
115 ureg_MOV(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_X), vtex);
116 ureg_MAD(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_Y), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
117 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X), ureg_imm1f(shader, -0.25f));
118 ureg_MAD(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_Z), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
119 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y), ureg_imm1f(shader, -0.25f));
120 ureg_RCP(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_W),
121 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y));
122
123 ureg_END(shader);
124
125 return ureg_create_shader_and_destroy(shader, c->pipe);
126 }
127
128 static void
129 create_frag_shader_weave(struct ureg_program *shader, struct ureg_dst fragment)
130 {
131 struct ureg_src i_tc[2];
132 struct ureg_src sampler[3];
133 struct ureg_dst t_tc[2];
134 struct ureg_dst t_texel[2];
135 unsigned i, j;
136
137 i_tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP, TGSI_INTERPOLATE_LINEAR);
138 i_tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM, TGSI_INTERPOLATE_LINEAR);
139
140 for (i = 0; i < 3; ++i)
141 sampler[i] = ureg_DECL_sampler(shader, i);
142
143 for (i = 0; i < 2; ++i) {
144 t_tc[i] = ureg_DECL_temporary(shader);
145 t_texel[i] = ureg_DECL_temporary(shader);
146 }
147
148 /* calculate the texture offsets
149 * t_tc.x = i_tc.x
150 * t_tc.y = (round(i_tc.y - 0.5) + 0.5) / height * 2
151 */
152 for (i = 0; i < 2; ++i) {
153 ureg_MOV(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_X), i_tc[i]);
154 ureg_SUB(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_YZ),
155 i_tc[i], ureg_imm1f(shader, 0.5f));
156 ureg_ROUND(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_YZ), ureg_src(t_tc[i]));
157 ureg_MOV(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_W),
158 ureg_imm1f(shader, i ? 1.0f : 0.0f));
159 ureg_ADD(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_YZ),
160 ureg_src(t_tc[i]), ureg_imm1f(shader, 0.5f));
161 ureg_MUL(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_Y),
162 ureg_src(t_tc[i]), ureg_scalar(i_tc[0], TGSI_SWIZZLE_W));
163 ureg_MUL(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_Z),
164 ureg_src(t_tc[i]), ureg_scalar(i_tc[1], TGSI_SWIZZLE_W));
165 }
166
167 /* fetch the texels
168 * texel[0..1].x = tex(t_tc[0..1][0])
169 * texel[0..1].y = tex(t_tc[0..1][1])
170 * texel[0..1].z = tex(t_tc[0..1][2])
171 */
172 for (i = 0; i < 2; ++i)
173 for (j = 0; j < 3; ++j) {
174 struct ureg_src src = ureg_swizzle(ureg_src(t_tc[i]),
175 TGSI_SWIZZLE_X, j ? TGSI_SWIZZLE_Z : TGSI_SWIZZLE_Y, TGSI_SWIZZLE_W, TGSI_SWIZZLE_W);
176
177 ureg_TEX(shader, ureg_writemask(t_texel[i], TGSI_WRITEMASK_X << j),
178 TGSI_TEXTURE_2D_ARRAY, src, sampler[j]);
179 }
180
181 /* calculate linear interpolation factor
182 * factor = |round(i_tc.y) - i_tc.y| * 2
183 */
184 ureg_ROUND(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_YZ), i_tc[0]);
185 ureg_ADD(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_YZ),
186 ureg_src(t_tc[0]), ureg_negate(i_tc[0]));
187 ureg_MUL(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_YZ),
188 ureg_abs(ureg_src(t_tc[0])), ureg_imm1f(shader, 2.0f));
189 ureg_LRP(shader, fragment, ureg_swizzle(ureg_src(t_tc[0]),
190 TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_Z),
191 ureg_src(t_texel[0]), ureg_src(t_texel[1]));
192
193 for (i = 0; i < 2; ++i) {
194 ureg_release_temporary(shader, t_texel[i]);
195 ureg_release_temporary(shader, t_tc[i]);
196 }
197 }
198
199 static void
200 create_frag_shader_csc(struct ureg_program *shader, struct ureg_dst texel,
201 struct ureg_dst fragment)
202 {
203 struct ureg_src csc[3];
204 struct ureg_src lumakey;
205 struct ureg_dst temp[2];
206 unsigned i;
207
208 for (i = 0; i < 3; ++i)
209 csc[i] = ureg_DECL_constant(shader, i);
210
211 lumakey = ureg_DECL_constant(shader, 3);
212
213 for (i = 0; i < 2; ++i)
214 temp[i] = ureg_DECL_temporary(shader);
215
216 ureg_MOV(shader, ureg_writemask(texel, TGSI_WRITEMASK_W),
217 ureg_imm1f(shader, 1.0f));
218
219 for (i = 0; i < 3; ++i)
220 ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i],
221 ureg_src(texel));
222
223 ureg_MOV(shader, ureg_writemask(temp[0], TGSI_WRITEMASK_W),
224 ureg_scalar(ureg_src(texel), TGSI_SWIZZLE_Z));
225 ureg_SLE(shader, ureg_writemask(temp[1],TGSI_WRITEMASK_W),
226 ureg_src(temp[0]), ureg_scalar(lumakey, TGSI_SWIZZLE_X));
227 ureg_SGT(shader, ureg_writemask(temp[0],TGSI_WRITEMASK_W),
228 ureg_src(temp[0]), ureg_scalar(lumakey, TGSI_SWIZZLE_Y));
229 ureg_MAX(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W),
230 ureg_src(temp[0]), ureg_src(temp[1]));
231
232 for (i = 0; i < 2; ++i)
233 ureg_release_temporary(shader, temp[i]);
234 }
235
236 static void *
237 create_frag_shader_video_buffer(struct vl_compositor *c)
238 {
239 struct ureg_program *shader;
240 struct ureg_src tc;
241 struct ureg_src sampler[3];
242 struct ureg_dst texel;
243 struct ureg_dst fragment;
244 unsigned i;
245
246 shader = ureg_create(PIPE_SHADER_FRAGMENT);
247 if (!shader)
248 return false;
249
250 tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR);
251 for (i = 0; i < 3; ++i)
252 sampler[i] = ureg_DECL_sampler(shader, i);
253
254 texel = ureg_DECL_temporary(shader);
255 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
256
257 /*
258 * texel.xyz = tex(tc, sampler[i])
259 * fragment = csc * texel
260 */
261 for (i = 0; i < 3; ++i)
262 ureg_TEX(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_2D_ARRAY, tc, sampler[i]);
263
264 create_frag_shader_csc(shader, texel, fragment);
265
266 ureg_release_temporary(shader, texel);
267 ureg_END(shader);
268
269 return ureg_create_shader_and_destroy(shader, c->pipe);
270 }
271
272 static void *
273 create_frag_shader_weave_rgb(struct vl_compositor *c)
274 {
275 struct ureg_program *shader;
276 struct ureg_dst texel, fragment;
277
278 shader = ureg_create(PIPE_SHADER_FRAGMENT);
279 if (!shader)
280 return false;
281
282 texel = ureg_DECL_temporary(shader);
283 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
284
285 create_frag_shader_weave(shader, texel);
286 create_frag_shader_csc(shader, texel, fragment);
287
288 ureg_release_temporary(shader, texel);
289
290 ureg_END(shader);
291
292 return ureg_create_shader_and_destroy(shader, c->pipe);
293 }
294
295 static void *
296 create_frag_shader_palette(struct vl_compositor *c, bool include_cc)
297 {
298 struct ureg_program *shader;
299 struct ureg_src csc[3];
300 struct ureg_src tc;
301 struct ureg_src sampler;
302 struct ureg_src palette;
303 struct ureg_dst texel;
304 struct ureg_dst fragment;
305 unsigned i;
306
307 shader = ureg_create(PIPE_SHADER_FRAGMENT);
308 if (!shader)
309 return false;
310
311 for (i = 0; include_cc && i < 3; ++i)
312 csc[i] = ureg_DECL_constant(shader, i);
313
314 tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR);
315 sampler = ureg_DECL_sampler(shader, 0);
316 palette = ureg_DECL_sampler(shader, 1);
317
318 texel = ureg_DECL_temporary(shader);
319 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
320
321 /*
322 * texel = tex(tc, sampler)
323 * fragment.xyz = tex(texel, palette) * csc
324 * fragment.a = texel.a
325 */
326 ureg_TEX(shader, texel, TGSI_TEXTURE_2D, tc, sampler);
327 ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_src(texel));
328
329 if (include_cc) {
330 ureg_TEX(shader, texel, TGSI_TEXTURE_1D, ureg_src(texel), palette);
331 for (i = 0; i < 3; ++i)
332 ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(texel));
333 } else {
334 ureg_TEX(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ),
335 TGSI_TEXTURE_1D, ureg_src(texel), palette);
336 }
337
338 ureg_release_temporary(shader, texel);
339 ureg_END(shader);
340
341 return ureg_create_shader_and_destroy(shader, c->pipe);
342 }
343
344 static void *
345 create_frag_shader_rgba(struct vl_compositor *c)
346 {
347 struct ureg_program *shader;
348 struct ureg_src tc, color, sampler;
349 struct ureg_dst texel, fragment;
350
351 shader = ureg_create(PIPE_SHADER_FRAGMENT);
352 if (!shader)
353 return false;
354
355 tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR);
356 color = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_COLOR, VS_O_COLOR, TGSI_INTERPOLATE_LINEAR);
357 sampler = ureg_DECL_sampler(shader, 0);
358 texel = ureg_DECL_temporary(shader);
359 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
360
361 /*
362 * fragment = tex(tc, sampler)
363 */
364 ureg_TEX(shader, texel, TGSI_TEXTURE_2D, tc, sampler);
365 ureg_MUL(shader, fragment, ureg_src(texel), color);
366 ureg_END(shader);
367
368 return ureg_create_shader_and_destroy(shader, c->pipe);
369 }
370
371 static bool
372 init_shaders(struct vl_compositor *c)
373 {
374 assert(c);
375
376 c->vs = create_vert_shader(c);
377 if (!c->vs) {
378 debug_printf("Unable to create vertex shader.\n");
379 return false;
380 }
381
382 c->fs_video_buffer = create_frag_shader_video_buffer(c);
383 if (!c->fs_video_buffer) {
384 debug_printf("Unable to create YCbCr-to-RGB fragment shader.\n");
385 return false;
386 }
387
388 c->fs_weave_rgb = create_frag_shader_weave_rgb(c);
389 if (!c->fs_weave_rgb) {
390 debug_printf("Unable to create YCbCr-to-RGB weave fragment shader.\n");
391 return false;
392 }
393
394 c->fs_palette.yuv = create_frag_shader_palette(c, true);
395 if (!c->fs_palette.yuv) {
396 debug_printf("Unable to create YUV-Palette-to-RGB fragment shader.\n");
397 return false;
398 }
399
400 c->fs_palette.rgb = create_frag_shader_palette(c, false);
401 if (!c->fs_palette.rgb) {
402 debug_printf("Unable to create RGB-Palette-to-RGB fragment shader.\n");
403 return false;
404 }
405
406 c->fs_rgba = create_frag_shader_rgba(c);
407 if (!c->fs_rgba) {
408 debug_printf("Unable to create RGB-to-RGB fragment shader.\n");
409 return false;
410 }
411
412 return true;
413 }
414
415 static void cleanup_shaders(struct vl_compositor *c)
416 {
417 assert(c);
418
419 c->pipe->delete_vs_state(c->pipe, c->vs);
420 c->pipe->delete_fs_state(c->pipe, c->fs_video_buffer);
421 c->pipe->delete_fs_state(c->pipe, c->fs_weave_rgb);
422 c->pipe->delete_fs_state(c->pipe, c->fs_palette.yuv);
423 c->pipe->delete_fs_state(c->pipe, c->fs_palette.rgb);
424 c->pipe->delete_fs_state(c->pipe, c->fs_rgba);
425 }
426
427 static bool
428 init_pipe_state(struct vl_compositor *c)
429 {
430 struct pipe_rasterizer_state rast;
431 struct pipe_sampler_state sampler;
432 struct pipe_blend_state blend;
433 struct pipe_depth_stencil_alpha_state dsa;
434 unsigned i;
435
436 assert(c);
437
438 c->fb_state.nr_cbufs = 1;
439 c->fb_state.zsbuf = NULL;
440
441 memset(&sampler, 0, sizeof(sampler));
442 sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
443 sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
444 sampler.wrap_r = PIPE_TEX_WRAP_REPEAT;
445 sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
446 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
447 sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
448 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
449 sampler.compare_func = PIPE_FUNC_ALWAYS;
450 sampler.normalized_coords = 1;
451
452 c->sampler_linear = c->pipe->create_sampler_state(c->pipe, &sampler);
453
454 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
455 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
456 c->sampler_nearest = c->pipe->create_sampler_state(c->pipe, &sampler);
457
458 memset(&blend, 0, sizeof blend);
459 blend.independent_blend_enable = 0;
460 blend.rt[0].blend_enable = 0;
461 blend.logicop_enable = 0;
462 blend.logicop_func = PIPE_LOGICOP_CLEAR;
463 blend.rt[0].colormask = PIPE_MASK_RGBA;
464 blend.dither = 0;
465 c->blend_clear = c->pipe->create_blend_state(c->pipe, &blend);
466
467 blend.rt[0].blend_enable = 1;
468 blend.rt[0].rgb_func = PIPE_BLEND_ADD;
469 blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
470 blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_INV_SRC_ALPHA;
471 blend.rt[0].alpha_func = PIPE_BLEND_ADD;
472 blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
473 blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
474 c->blend_add = c->pipe->create_blend_state(c->pipe, &blend);
475
476 memset(&rast, 0, sizeof rast);
477 rast.flatshade = 0;
478 rast.front_ccw = 1;
479 rast.cull_face = PIPE_FACE_NONE;
480 rast.fill_back = PIPE_POLYGON_MODE_FILL;
481 rast.fill_front = PIPE_POLYGON_MODE_FILL;
482 rast.scissor = 1;
483 rast.line_width = 1;
484 rast.point_size_per_vertex = 1;
485 rast.offset_units = 1;
486 rast.offset_scale = 1;
487 rast.half_pixel_center = 1;
488 rast.bottom_edge_rule = 1;
489 rast.depth_clip = 1;
490
491 c->rast = c->pipe->create_rasterizer_state(c->pipe, &rast);
492
493 memset(&dsa, 0, sizeof dsa);
494 dsa.depth.enabled = 0;
495 dsa.depth.writemask = 0;
496 dsa.depth.func = PIPE_FUNC_ALWAYS;
497 for (i = 0; i < 2; ++i) {
498 dsa.stencil[i].enabled = 0;
499 dsa.stencil[i].func = PIPE_FUNC_ALWAYS;
500 dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP;
501 dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP;
502 dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP;
503 dsa.stencil[i].valuemask = 0;
504 dsa.stencil[i].writemask = 0;
505 }
506 dsa.alpha.enabled = 0;
507 dsa.alpha.func = PIPE_FUNC_ALWAYS;
508 dsa.alpha.ref_value = 0;
509 c->dsa = c->pipe->create_depth_stencil_alpha_state(c->pipe, &dsa);
510 c->pipe->bind_depth_stencil_alpha_state(c->pipe, c->dsa);
511
512 return true;
513 }
514
515 static void cleanup_pipe_state(struct vl_compositor *c)
516 {
517 assert(c);
518
519 /* Asserted in softpipe_delete_fs_state() for some reason */
520 c->pipe->bind_vs_state(c->pipe, NULL);
521 c->pipe->bind_fs_state(c->pipe, NULL);
522
523 c->pipe->delete_depth_stencil_alpha_state(c->pipe, c->dsa);
524 c->pipe->delete_sampler_state(c->pipe, c->sampler_linear);
525 c->pipe->delete_sampler_state(c->pipe, c->sampler_nearest);
526 c->pipe->delete_blend_state(c->pipe, c->blend_clear);
527 c->pipe->delete_blend_state(c->pipe, c->blend_add);
528 c->pipe->delete_rasterizer_state(c->pipe, c->rast);
529 }
530
531 static bool
532 init_buffers(struct vl_compositor *c)
533 {
534 struct pipe_vertex_element vertex_elems[3];
535
536 assert(c);
537
538 /*
539 * Create our vertex buffer and vertex buffer elements
540 */
541 c->vertex_buf.stride = sizeof(struct vertex2f) + sizeof(struct vertex4f) * 2;
542 c->vertex_buf.buffer_offset = 0;
543 c->vertex_buf.buffer = NULL;
544
545 vertex_elems[0].src_offset = 0;
546 vertex_elems[0].instance_divisor = 0;
547 vertex_elems[0].vertex_buffer_index = 0;
548 vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
549 vertex_elems[1].src_offset = sizeof(struct vertex2f);
550 vertex_elems[1].instance_divisor = 0;
551 vertex_elems[1].vertex_buffer_index = 0;
552 vertex_elems[1].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
553 vertex_elems[2].src_offset = sizeof(struct vertex2f) + sizeof(struct vertex4f);
554 vertex_elems[2].instance_divisor = 0;
555 vertex_elems[2].vertex_buffer_index = 0;
556 vertex_elems[2].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
557 c->vertex_elems_state = c->pipe->create_vertex_elements_state(c->pipe, 3, vertex_elems);
558
559 return true;
560 }
561
562 static void
563 cleanup_buffers(struct vl_compositor *c)
564 {
565 assert(c);
566
567 c->pipe->delete_vertex_elements_state(c->pipe, c->vertex_elems_state);
568 pipe_resource_reference(&c->vertex_buf.buffer, NULL);
569 }
570
571 static inline struct u_rect
572 default_rect(struct vl_compositor_layer *layer)
573 {
574 struct pipe_resource *res = layer->sampler_views[0]->texture;
575 struct u_rect rect = { 0, res->width0, 0, res->height0 * res->array_size };
576 return rect;
577 }
578
579 static inline struct vertex2f
580 calc_topleft(struct vertex2f size, struct u_rect rect)
581 {
582 struct vertex2f res = { rect.x0 / size.x, rect.y0 / size.y };
583 return res;
584 }
585
586 static inline struct vertex2f
587 calc_bottomright(struct vertex2f size, struct u_rect rect)
588 {
589 struct vertex2f res = { rect.x1 / size.x, rect.y1 / size.y };
590 return res;
591 }
592
593 static inline void
594 calc_src_and_dst(struct vl_compositor_layer *layer, unsigned width, unsigned height,
595 struct u_rect src, struct u_rect dst)
596 {
597 struct vertex2f size = { width, height };
598
599 layer->src.tl = calc_topleft(size, src);
600 layer->src.br = calc_bottomright(size, src);
601 layer->dst.tl = calc_topleft(size, dst);
602 layer->dst.br = calc_bottomright(size, dst);
603 layer->zw.x = 0.0f;
604 layer->zw.y = size.y;
605 }
606
607 static void
608 gen_rect_verts(struct vertex2f *vb, struct vl_compositor_layer *layer)
609 {
610 struct vertex2f tl, tr, br, bl;
611
612 assert(vb && layer);
613
614 switch (layer->rotate) {
615 default:
616 case VL_COMPOSITOR_ROTATE_0:
617 tl = layer->dst.tl;
618 tr.x = layer->dst.br.x;
619 tr.y = layer->dst.tl.y;
620 br = layer->dst.br;
621 bl.x = layer->dst.tl.x;
622 bl.y = layer->dst.br.y;
623 break;
624 case VL_COMPOSITOR_ROTATE_90:
625 tl.x = layer->dst.br.x;
626 tl.y = layer->dst.tl.y;
627 tr = layer->dst.br;
628 br.x = layer->dst.tl.x;
629 br.y = layer->dst.br.y;
630 bl = layer->dst.tl;
631 break;
632 case VL_COMPOSITOR_ROTATE_180:
633 tl = layer->dst.br;
634 tr.x = layer->dst.tl.x;
635 tr.y = layer->dst.br.y;
636 br = layer->dst.tl;
637 bl.x = layer->dst.br.x;
638 bl.y = layer->dst.tl.y;
639 break;
640 case VL_COMPOSITOR_ROTATE_270:
641 tl.x = layer->dst.tl.x;
642 tl.y = layer->dst.br.y;
643 tr = layer->dst.tl;
644 br.x = layer->dst.br.x;
645 br.y = layer->dst.tl.y;
646 bl = layer->dst.br;
647 break;
648 }
649
650 vb[ 0].x = tl.x;
651 vb[ 0].y = tl.y;
652 vb[ 1].x = layer->src.tl.x;
653 vb[ 1].y = layer->src.tl.y;
654 vb[ 2] = layer->zw;
655 vb[ 3].x = layer->colors[0].x;
656 vb[ 3].y = layer->colors[0].y;
657 vb[ 4].x = layer->colors[0].z;
658 vb[ 4].y = layer->colors[0].w;
659
660 vb[ 5].x = tr.x;
661 vb[ 5].y = tr.y;
662 vb[ 6].x = layer->src.br.x;
663 vb[ 6].y = layer->src.tl.y;
664 vb[ 7] = layer->zw;
665 vb[ 8].x = layer->colors[1].x;
666 vb[ 8].y = layer->colors[1].y;
667 vb[ 9].x = layer->colors[1].z;
668 vb[ 9].y = layer->colors[1].w;
669
670 vb[10].x = br.x;
671 vb[10].y = br.y;
672 vb[11].x = layer->src.br.x;
673 vb[11].y = layer->src.br.y;
674 vb[12] = layer->zw;
675 vb[13].x = layer->colors[2].x;
676 vb[13].y = layer->colors[2].y;
677 vb[14].x = layer->colors[2].z;
678 vb[14].y = layer->colors[2].w;
679
680 vb[15].x = bl.x;
681 vb[15].y = bl.y;
682 vb[16].x = layer->src.tl.x;
683 vb[16].y = layer->src.br.y;
684 vb[17] = layer->zw;
685 vb[18].x = layer->colors[3].x;
686 vb[18].y = layer->colors[3].y;
687 vb[19].x = layer->colors[3].z;
688 vb[19].y = layer->colors[3].w;
689 }
690
691 static inline struct u_rect
692 calc_drawn_area(struct vl_compositor_state *s, struct vl_compositor_layer *layer)
693 {
694 struct vertex2f tl, br;
695 struct u_rect result;
696
697 assert(s && layer);
698
699 // rotate
700 switch (layer->rotate) {
701 default:
702 case VL_COMPOSITOR_ROTATE_0:
703 tl = layer->dst.tl;
704 br = layer->dst.br;
705 break;
706 case VL_COMPOSITOR_ROTATE_90:
707 tl.x = layer->dst.br.x;
708 tl.y = layer->dst.tl.y;
709 br.x = layer->dst.tl.x;
710 br.y = layer->dst.br.y;
711 break;
712 case VL_COMPOSITOR_ROTATE_180:
713 tl = layer->dst.br;
714 br = layer->dst.tl;
715 break;
716 case VL_COMPOSITOR_ROTATE_270:
717 tl.x = layer->dst.tl.x;
718 tl.y = layer->dst.br.y;
719 br.x = layer->dst.br.x;
720 br.y = layer->dst.tl.y;
721 break;
722 }
723
724 // scale
725 result.x0 = tl.x * layer->viewport.scale[0] + layer->viewport.translate[0];
726 result.y0 = tl.y * layer->viewport.scale[1] + layer->viewport.translate[1];
727 result.x1 = br.x * layer->viewport.scale[0] + layer->viewport.translate[0];
728 result.y1 = br.y * layer->viewport.scale[1] + layer->viewport.translate[1];
729
730 // and clip
731 result.x0 = MAX2(result.x0, s->scissor.minx);
732 result.y0 = MAX2(result.y0, s->scissor.miny);
733 result.x1 = MIN2(result.x1, s->scissor.maxx);
734 result.y1 = MIN2(result.y1, s->scissor.maxy);
735 return result;
736 }
737
738 static void
739 gen_vertex_data(struct vl_compositor *c, struct vl_compositor_state *s, struct u_rect *dirty)
740 {
741 struct vertex2f *vb;
742 unsigned i;
743
744 assert(c);
745
746 /* Allocate new memory for vertices. */
747 u_upload_alloc(c->upload, 0,
748 c->vertex_buf.stride * VL_COMPOSITOR_MAX_LAYERS * 4, /* size */
749 4, /* alignment */
750 &c->vertex_buf.buffer_offset, &c->vertex_buf.buffer,
751 (void**)&vb);
752
753 for (i = 0; i < VL_COMPOSITOR_MAX_LAYERS; i++) {
754 if (s->used_layers & (1 << i)) {
755 struct vl_compositor_layer *layer = &s->layers[i];
756 gen_rect_verts(vb, layer);
757 vb += 20;
758
759 if (!layer->viewport_valid) {
760 layer->viewport.scale[0] = c->fb_state.width;
761 layer->viewport.scale[1] = c->fb_state.height;
762 layer->viewport.translate[0] = 0;
763 layer->viewport.translate[1] = 0;
764 }
765
766 if (dirty && layer->clearing) {
767 struct u_rect drawn = calc_drawn_area(s, layer);
768 if (
769 dirty->x0 >= drawn.x0 &&
770 dirty->y0 >= drawn.y0 &&
771 dirty->x1 <= drawn.x1 &&
772 dirty->y1 <= drawn.y1) {
773
774 // We clear the dirty area anyway, no need for clear_render_target
775 dirty->x0 = dirty->y0 = MAX_DIRTY;
776 dirty->x1 = dirty->y1 = MIN_DIRTY;
777 }
778 }
779 }
780 }
781
782 u_upload_unmap(c->upload);
783 }
784
785 static void
786 draw_layers(struct vl_compositor *c, struct vl_compositor_state *s, struct u_rect *dirty)
787 {
788 unsigned vb_index, i;
789
790 assert(c);
791
792 for (i = 0, vb_index = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i) {
793 if (s->used_layers & (1 << i)) {
794 struct vl_compositor_layer *layer = &s->layers[i];
795 struct pipe_sampler_view **samplers = &layer->sampler_views[0];
796 unsigned num_sampler_views = !samplers[1] ? 1 : !samplers[2] ? 2 : 3;
797 void *blend = layer->blend ? layer->blend : i ? c->blend_add : c->blend_clear;
798
799 c->pipe->bind_blend_state(c->pipe, blend);
800 c->pipe->set_viewport_states(c->pipe, 0, 1, &layer->viewport);
801 c->pipe->bind_fs_state(c->pipe, layer->fs);
802 c->pipe->bind_sampler_states(c->pipe, PIPE_SHADER_FRAGMENT, 0,
803 num_sampler_views, layer->samplers);
804 c->pipe->set_sampler_views(c->pipe, PIPE_SHADER_FRAGMENT, 0,
805 num_sampler_views, samplers);
806
807 util_draw_arrays(c->pipe, PIPE_PRIM_QUADS, vb_index * 4, 4);
808 vb_index++;
809
810 if (dirty) {
811 // Remember the currently drawn area as dirty for the next draw command
812 struct u_rect drawn = calc_drawn_area(s, layer);
813 dirty->x0 = MIN2(drawn.x0, dirty->x0);
814 dirty->y0 = MIN2(drawn.y0, dirty->y0);
815 dirty->x1 = MAX2(drawn.x1, dirty->x1);
816 dirty->y1 = MAX2(drawn.y1, dirty->y1);
817 }
818 }
819 }
820 }
821
822 void
823 vl_compositor_reset_dirty_area(struct u_rect *dirty)
824 {
825 assert(dirty);
826
827 dirty->x0 = dirty->y0 = MIN_DIRTY;
828 dirty->x1 = dirty->y1 = MAX_DIRTY;
829 }
830
831 void
832 vl_compositor_set_clear_color(struct vl_compositor_state *s, union pipe_color_union *color)
833 {
834 assert(s);
835 assert(color);
836
837 s->clear_color = *color;
838 }
839
840 void
841 vl_compositor_get_clear_color(struct vl_compositor_state *s, union pipe_color_union *color)
842 {
843 assert(s);
844 assert(color);
845
846 *color = s->clear_color;
847 }
848
849 void
850 vl_compositor_clear_layers(struct vl_compositor_state *s)
851 {
852 unsigned i, j;
853
854 assert(s);
855
856 s->used_layers = 0;
857 for ( i = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i) {
858 struct vertex4f v_one = { 1.0f, 1.0f, 1.0f, 1.0f };
859 s->layers[i].clearing = i ? false : true;
860 s->layers[i].blend = NULL;
861 s->layers[i].fs = NULL;
862 s->layers[i].viewport.scale[2] = 1;
863 s->layers[i].viewport.translate[2] = 0;
864 s->layers[i].rotate = VL_COMPOSITOR_ROTATE_0;
865
866 for ( j = 0; j < 3; j++)
867 pipe_sampler_view_reference(&s->layers[i].sampler_views[j], NULL);
868 for ( j = 0; j < 4; ++j)
869 s->layers[i].colors[j] = v_one;
870 }
871 }
872
873 void
874 vl_compositor_cleanup(struct vl_compositor *c)
875 {
876 assert(c);
877
878 u_upload_destroy(c->upload);
879 cleanup_buffers(c);
880 cleanup_shaders(c);
881 cleanup_pipe_state(c);
882 }
883
884 void
885 vl_compositor_set_csc_matrix(struct vl_compositor_state *s,
886 vl_csc_matrix const *matrix,
887 float luma_min, float luma_max)
888 {
889 struct pipe_transfer *buf_transfer;
890
891 assert(s);
892
893 float *ptr = pipe_buffer_map(s->pipe, s->csc_matrix,
894 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
895 &buf_transfer);
896
897 memcpy(ptr, matrix, sizeof(vl_csc_matrix));
898
899 ptr += sizeof(vl_csc_matrix)/sizeof(float);
900 ptr[0] = luma_min;
901 ptr[1] = luma_max;
902
903 pipe_buffer_unmap(s->pipe, buf_transfer);
904 }
905
906 void
907 vl_compositor_set_dst_clip(struct vl_compositor_state *s, struct u_rect *dst_clip)
908 {
909 assert(s);
910
911 s->scissor_valid = dst_clip != NULL;
912 if (dst_clip) {
913 s->scissor.minx = dst_clip->x0;
914 s->scissor.miny = dst_clip->y0;
915 s->scissor.maxx = dst_clip->x1;
916 s->scissor.maxy = dst_clip->y1;
917 }
918 }
919
920 void
921 vl_compositor_set_layer_blend(struct vl_compositor_state *s,
922 unsigned layer, void *blend,
923 bool is_clearing)
924 {
925 assert(s && blend);
926
927 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
928
929 s->layers[layer].clearing = is_clearing;
930 s->layers[layer].blend = blend;
931 }
932
933 void
934 vl_compositor_set_layer_dst_area(struct vl_compositor_state *s,
935 unsigned layer, struct u_rect *dst_area)
936 {
937 assert(s);
938
939 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
940
941 s->layers[layer].viewport_valid = dst_area != NULL;
942 if (dst_area) {
943 s->layers[layer].viewport.scale[0] = dst_area->x1 - dst_area->x0;
944 s->layers[layer].viewport.scale[1] = dst_area->y1 - dst_area->y0;
945 s->layers[layer].viewport.translate[0] = dst_area->x0;
946 s->layers[layer].viewport.translate[1] = dst_area->y0;
947 }
948 }
949
950 void
951 vl_compositor_set_buffer_layer(struct vl_compositor_state *s,
952 struct vl_compositor *c,
953 unsigned layer,
954 struct pipe_video_buffer *buffer,
955 struct u_rect *src_rect,
956 struct u_rect *dst_rect,
957 enum vl_compositor_deinterlace deinterlace)
958 {
959 struct pipe_sampler_view **sampler_views;
960 unsigned i;
961
962 assert(s && c && buffer);
963
964 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
965
966 s->used_layers |= 1 << layer;
967 sampler_views = buffer->get_sampler_view_components(buffer);
968 for (i = 0; i < 3; ++i) {
969 s->layers[layer].samplers[i] = c->sampler_linear;
970 pipe_sampler_view_reference(&s->layers[layer].sampler_views[i], sampler_views[i]);
971 }
972
973 calc_src_and_dst(&s->layers[layer], buffer->width, buffer->height,
974 src_rect ? *src_rect : default_rect(&s->layers[layer]),
975 dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
976
977 if (buffer->interlaced) {
978 float half_a_line = 0.5f / s->layers[layer].zw.y;
979 switch(deinterlace) {
980 case VL_COMPOSITOR_WEAVE:
981 s->layers[layer].fs = c->fs_weave_rgb;
982 break;
983
984 case VL_COMPOSITOR_BOB_TOP:
985 s->layers[layer].zw.x = 0.0f;
986 s->layers[layer].src.tl.y += half_a_line;
987 s->layers[layer].src.br.y += half_a_line;
988 s->layers[layer].fs = c->fs_video_buffer;
989 break;
990
991 case VL_COMPOSITOR_BOB_BOTTOM:
992 s->layers[layer].zw.x = 1.0f;
993 s->layers[layer].src.tl.y -= half_a_line;
994 s->layers[layer].src.br.y -= half_a_line;
995 s->layers[layer].fs = c->fs_video_buffer;
996 break;
997 }
998
999 } else
1000 s->layers[layer].fs = c->fs_video_buffer;
1001 }
1002
1003 void
1004 vl_compositor_set_palette_layer(struct vl_compositor_state *s,
1005 struct vl_compositor *c,
1006 unsigned layer,
1007 struct pipe_sampler_view *indexes,
1008 struct pipe_sampler_view *palette,
1009 struct u_rect *src_rect,
1010 struct u_rect *dst_rect,
1011 bool include_color_conversion)
1012 {
1013 assert(s && c && indexes && palette);
1014
1015 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
1016
1017 s->used_layers |= 1 << layer;
1018
1019 s->layers[layer].fs = include_color_conversion ?
1020 c->fs_palette.yuv : c->fs_palette.rgb;
1021
1022 s->layers[layer].samplers[0] = c->sampler_linear;
1023 s->layers[layer].samplers[1] = c->sampler_nearest;
1024 s->layers[layer].samplers[2] = NULL;
1025 pipe_sampler_view_reference(&s->layers[layer].sampler_views[0], indexes);
1026 pipe_sampler_view_reference(&s->layers[layer].sampler_views[1], palette);
1027 pipe_sampler_view_reference(&s->layers[layer].sampler_views[2], NULL);
1028 calc_src_and_dst(&s->layers[layer], indexes->texture->width0, indexes->texture->height0,
1029 src_rect ? *src_rect : default_rect(&s->layers[layer]),
1030 dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
1031 }
1032
1033 void
1034 vl_compositor_set_rgba_layer(struct vl_compositor_state *s,
1035 struct vl_compositor *c,
1036 unsigned layer,
1037 struct pipe_sampler_view *rgba,
1038 struct u_rect *src_rect,
1039 struct u_rect *dst_rect,
1040 struct vertex4f *colors)
1041 {
1042 unsigned i;
1043
1044 assert(s && c && rgba);
1045
1046 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
1047
1048 s->used_layers |= 1 << layer;
1049 s->layers[layer].fs = c->fs_rgba;
1050 s->layers[layer].samplers[0] = c->sampler_linear;
1051 s->layers[layer].samplers[1] = NULL;
1052 s->layers[layer].samplers[2] = NULL;
1053 pipe_sampler_view_reference(&s->layers[layer].sampler_views[0], rgba);
1054 pipe_sampler_view_reference(&s->layers[layer].sampler_views[1], NULL);
1055 pipe_sampler_view_reference(&s->layers[layer].sampler_views[2], NULL);
1056 calc_src_and_dst(&s->layers[layer], rgba->texture->width0, rgba->texture->height0,
1057 src_rect ? *src_rect : default_rect(&s->layers[layer]),
1058 dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
1059
1060 if (colors)
1061 for (i = 0; i < 4; ++i)
1062 s->layers[layer].colors[i] = colors[i];
1063 }
1064
1065 void
1066 vl_compositor_set_layer_rotation(struct vl_compositor_state *s,
1067 unsigned layer,
1068 enum vl_compositor_rotation rotate)
1069 {
1070 assert(s);
1071 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
1072 s->layers[layer].rotate = rotate;
1073 }
1074
1075 void
1076 vl_compositor_render(struct vl_compositor_state *s,
1077 struct vl_compositor *c,
1078 struct pipe_surface *dst_surface,
1079 struct u_rect *dirty_area,
1080 bool clear_dirty)
1081 {
1082 assert(c);
1083 assert(dst_surface);
1084
1085 c->fb_state.width = dst_surface->width;
1086 c->fb_state.height = dst_surface->height;
1087 c->fb_state.cbufs[0] = dst_surface;
1088
1089 if (!s->scissor_valid) {
1090 s->scissor.minx = 0;
1091 s->scissor.miny = 0;
1092 s->scissor.maxx = dst_surface->width;
1093 s->scissor.maxy = dst_surface->height;
1094 }
1095 c->pipe->set_scissor_states(c->pipe, 0, 1, &s->scissor);
1096
1097 gen_vertex_data(c, s, dirty_area);
1098
1099 if (clear_dirty && dirty_area &&
1100 (dirty_area->x0 < dirty_area->x1 || dirty_area->y0 < dirty_area->y1)) {
1101
1102 c->pipe->clear_render_target(c->pipe, dst_surface, &s->clear_color,
1103 0, 0, dst_surface->width, dst_surface->height);
1104 dirty_area->x0 = dirty_area->y0 = MAX_DIRTY;
1105 dirty_area->x1 = dirty_area->y1 = MIN_DIRTY;
1106 }
1107
1108 c->pipe->set_framebuffer_state(c->pipe, &c->fb_state);
1109 c->pipe->bind_vs_state(c->pipe, c->vs);
1110 c->pipe->set_vertex_buffers(c->pipe, 0, 1, &c->vertex_buf);
1111 c->pipe->bind_vertex_elements_state(c->pipe, c->vertex_elems_state);
1112 pipe_set_constant_buffer(c->pipe, PIPE_SHADER_FRAGMENT, 0, s->csc_matrix);
1113 c->pipe->bind_rasterizer_state(c->pipe, c->rast);
1114
1115 draw_layers(c, s, dirty_area);
1116 }
1117
1118 bool
1119 vl_compositor_init(struct vl_compositor *c, struct pipe_context *pipe)
1120 {
1121 assert(c);
1122
1123 memset(c, 0, sizeof(*c));
1124
1125 c->pipe = pipe;
1126
1127 c->upload = u_upload_create(pipe, 128 * 1024, PIPE_BIND_VERTEX_BUFFER,
1128 PIPE_USAGE_STREAM);
1129
1130 if (!c->upload)
1131 return false;
1132
1133 if (!init_pipe_state(c)) {
1134 u_upload_destroy(c->upload);
1135 return false;
1136 }
1137
1138 if (!init_shaders(c)) {
1139 u_upload_destroy(c->upload);
1140 cleanup_pipe_state(c);
1141 return false;
1142 }
1143
1144 if (!init_buffers(c)) {
1145 u_upload_destroy(c->upload);
1146 cleanup_shaders(c);
1147 cleanup_pipe_state(c);
1148 return false;
1149 }
1150
1151 return true;
1152 }
1153
1154 bool
1155 vl_compositor_init_state(struct vl_compositor_state *s, struct pipe_context *pipe)
1156 {
1157 vl_csc_matrix csc_matrix;
1158
1159 assert(s);
1160
1161 memset(s, 0, sizeof(*s));
1162
1163 s->pipe = pipe;
1164
1165 s->clear_color.f[0] = s->clear_color.f[1] = 0.0f;
1166 s->clear_color.f[2] = s->clear_color.f[3] = 0.0f;
1167
1168 /*
1169 * Create our fragment shader's constant buffer
1170 * Const buffer contains the color conversion matrix and bias vectors
1171 */
1172 /* XXX: Create with IMMUTABLE/STATIC... although it does change every once in a long while... */
1173 s->csc_matrix = pipe_buffer_create
1174 (
1175 pipe->screen,
1176 PIPE_BIND_CONSTANT_BUFFER,
1177 PIPE_USAGE_DEFAULT,
1178 sizeof(csc_matrix) + 2*sizeof(float)
1179 );
1180
1181 vl_compositor_clear_layers(s);
1182
1183 vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_IDENTITY, NULL, true, &csc_matrix);
1184 vl_compositor_set_csc_matrix(s, (const vl_csc_matrix *)&csc_matrix, 1.0f, 0.0f);
1185
1186 return true;
1187 }
1188
1189 void
1190 vl_compositor_cleanup_state(struct vl_compositor_state *s)
1191 {
1192 assert(s);
1193
1194 vl_compositor_clear_layers(s);
1195 pipe_resource_reference(&s->csc_matrix, NULL);
1196 }