u_upload_mgr: pass alignment to u_upload_alloc manually
[mesa.git] / src / gallium / auxiliary / vl / vl_compositor.c
1 /**************************************************************************
2 *
3 * Copyright 2009 Younes Manton.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <assert.h>
29
30 #include "pipe/p_compiler.h"
31 #include "pipe/p_context.h"
32
33 #include "util/u_memory.h"
34 #include "util/u_draw.h"
35 #include "util/u_surface.h"
36 #include "util/u_upload_mgr.h"
37
38 #include "tgsi/tgsi_ureg.h"
39
40 #include "vl_csc.h"
41 #include "vl_types.h"
42 #include "vl_compositor.h"
43
44 #define MIN_DIRTY (0)
45 #define MAX_DIRTY (1 << 15)
46
47 enum VS_OUTPUT
48 {
49 VS_O_VPOS = 0,
50 VS_O_COLOR = 0,
51 VS_O_VTEX = 0,
52 VS_O_VTOP,
53 VS_O_VBOTTOM,
54 };
55
56 static void *
57 create_vert_shader(struct vl_compositor *c)
58 {
59 struct ureg_program *shader;
60 struct ureg_src vpos, vtex, color;
61 struct ureg_dst tmp;
62 struct ureg_dst o_vpos, o_vtex, o_color;
63 struct ureg_dst o_vtop, o_vbottom;
64
65 shader = ureg_create(TGSI_PROCESSOR_VERTEX);
66 if (!shader)
67 return false;
68
69 vpos = ureg_DECL_vs_input(shader, 0);
70 vtex = ureg_DECL_vs_input(shader, 1);
71 color = ureg_DECL_vs_input(shader, 2);
72 tmp = ureg_DECL_temporary(shader);
73 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
74 o_color = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, VS_O_COLOR);
75 o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX);
76 o_vtop = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP);
77 o_vbottom = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM);
78
79 /*
80 * o_vpos = vpos
81 * o_vtex = vtex
82 * o_color = color
83 */
84 ureg_MOV(shader, o_vpos, vpos);
85 ureg_MOV(shader, o_vtex, vtex);
86 ureg_MOV(shader, o_color, color);
87
88 /*
89 * tmp.x = vtex.w / 2
90 * tmp.y = vtex.w / 4
91 *
92 * o_vtop.x = vtex.x
93 * o_vtop.y = vtex.y * tmp.x + 0.25f
94 * o_vtop.z = vtex.y * tmp.y + 0.25f
95 * o_vtop.w = 1 / tmp.x
96 *
97 * o_vbottom.x = vtex.x
98 * o_vbottom.y = vtex.y * tmp.x - 0.25f
99 * o_vbottom.z = vtex.y * tmp.y - 0.25f
100 * o_vbottom.w = 1 / tmp.y
101 */
102 ureg_MUL(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X),
103 ureg_scalar(vtex, TGSI_SWIZZLE_W), ureg_imm1f(shader, 0.5f));
104 ureg_MUL(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y),
105 ureg_scalar(vtex, TGSI_SWIZZLE_W), ureg_imm1f(shader, 0.25f));
106
107 ureg_MOV(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_X), vtex);
108 ureg_MAD(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_Y), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
109 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X), ureg_imm1f(shader, 0.25f));
110 ureg_MAD(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_Z), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
111 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y), ureg_imm1f(shader, 0.25f));
112 ureg_RCP(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_W),
113 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X));
114
115 ureg_MOV(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_X), vtex);
116 ureg_MAD(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_Y), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
117 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X), ureg_imm1f(shader, -0.25f));
118 ureg_MAD(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_Z), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
119 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y), ureg_imm1f(shader, -0.25f));
120 ureg_RCP(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_W),
121 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y));
122
123 ureg_END(shader);
124
125 return ureg_create_shader_and_destroy(shader, c->pipe);
126 }
127
128 static void *
129 create_frag_shader_video_buffer(struct vl_compositor *c)
130 {
131 struct ureg_program *shader;
132 struct ureg_src tc;
133 struct ureg_src csc[3];
134 struct ureg_src sampler[3];
135 struct ureg_dst texel;
136 struct ureg_dst fragment;
137 unsigned i;
138
139 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
140 if (!shader)
141 return false;
142
143 tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR);
144 for (i = 0; i < 3; ++i) {
145 csc[i] = ureg_DECL_constant(shader, i);
146 sampler[i] = ureg_DECL_sampler(shader, i);
147 }
148 texel = ureg_DECL_temporary(shader);
149 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
150
151 /*
152 * texel.xyz = tex(tc, sampler[i])
153 * fragment = csc * texel
154 */
155 for (i = 0; i < 3; ++i)
156 ureg_TEX(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_2D_ARRAY, tc, sampler[i]);
157
158 ureg_MOV(shader, ureg_writemask(texel, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f));
159
160 for (i = 0; i < 3; ++i)
161 ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(texel));
162
163 ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f));
164
165 ureg_release_temporary(shader, texel);
166 ureg_END(shader);
167
168 return ureg_create_shader_and_destroy(shader, c->pipe);
169 }
170
171 static void *
172 create_frag_shader_weave(struct vl_compositor *c)
173 {
174 struct ureg_program *shader;
175 struct ureg_src i_tc[2];
176 struct ureg_src csc[3];
177 struct ureg_src sampler[3];
178 struct ureg_dst t_tc[2];
179 struct ureg_dst t_texel[2];
180 struct ureg_dst o_fragment;
181 unsigned i, j;
182
183 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
184 if (!shader)
185 return false;
186
187 i_tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP, TGSI_INTERPOLATE_LINEAR);
188 i_tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM, TGSI_INTERPOLATE_LINEAR);
189
190 for (i = 0; i < 3; ++i) {
191 csc[i] = ureg_DECL_constant(shader, i);
192 sampler[i] = ureg_DECL_sampler(shader, i);
193 }
194
195 for (i = 0; i < 2; ++i) {
196 t_tc[i] = ureg_DECL_temporary(shader);
197 t_texel[i] = ureg_DECL_temporary(shader);
198 }
199 o_fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
200
201 /* calculate the texture offsets
202 * t_tc.x = i_tc.x
203 * t_tc.y = (round(i_tc.y - 0.5) + 0.5) / height * 2
204 */
205 for (i = 0; i < 2; ++i) {
206 ureg_MOV(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_X), i_tc[i]);
207 ureg_SUB(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_YZ),
208 i_tc[i], ureg_imm1f(shader, 0.5f));
209 ureg_ROUND(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_YZ), ureg_src(t_tc[i]));
210 ureg_MOV(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_W),
211 ureg_imm1f(shader, i ? 1.0f : 0.0f));
212 ureg_ADD(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_YZ),
213 ureg_src(t_tc[i]), ureg_imm1f(shader, 0.5f));
214 ureg_MUL(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_Y),
215 ureg_src(t_tc[i]), ureg_scalar(i_tc[0], TGSI_SWIZZLE_W));
216 ureg_MUL(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_Z),
217 ureg_src(t_tc[i]), ureg_scalar(i_tc[1], TGSI_SWIZZLE_W));
218 }
219
220 /* fetch the texels
221 * texel[0..1].x = tex(t_tc[0..1][0])
222 * texel[0..1].y = tex(t_tc[0..1][1])
223 * texel[0..1].z = tex(t_tc[0..1][2])
224 */
225 for (i = 0; i < 2; ++i)
226 for (j = 0; j < 3; ++j) {
227 struct ureg_src src = ureg_swizzle(ureg_src(t_tc[i]),
228 TGSI_SWIZZLE_X, j ? TGSI_SWIZZLE_Z : TGSI_SWIZZLE_Y, TGSI_SWIZZLE_W, TGSI_SWIZZLE_W);
229
230 ureg_TEX(shader, ureg_writemask(t_texel[i], TGSI_WRITEMASK_X << j),
231 TGSI_TEXTURE_2D_ARRAY, src, sampler[j]);
232 }
233
234 /* calculate linear interpolation factor
235 * factor = |round(i_tc.y) - i_tc.y| * 2
236 */
237 ureg_ROUND(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_YZ), i_tc[0]);
238 ureg_ADD(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_YZ),
239 ureg_src(t_tc[0]), ureg_negate(i_tc[0]));
240 ureg_MUL(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_YZ),
241 ureg_abs(ureg_src(t_tc[0])), ureg_imm1f(shader, 2.0f));
242 ureg_LRP(shader, t_texel[0], ureg_swizzle(ureg_src(t_tc[0]),
243 TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_Z),
244 ureg_src(t_texel[0]), ureg_src(t_texel[1]));
245
246 /* and finally do colour space transformation
247 * fragment = csc * texel
248 */
249 ureg_MOV(shader, ureg_writemask(t_texel[0], TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f));
250 for (i = 0; i < 3; ++i)
251 ureg_DP4(shader, ureg_writemask(o_fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(t_texel[0]));
252
253 ureg_MOV(shader, ureg_writemask(o_fragment, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f));
254
255 for (i = 0; i < 2; ++i) {
256 ureg_release_temporary(shader, t_texel[i]);
257 ureg_release_temporary(shader, t_tc[i]);
258 }
259
260 ureg_END(shader);
261
262 return ureg_create_shader_and_destroy(shader, c->pipe);
263 }
264
265 static void *
266 create_frag_shader_palette(struct vl_compositor *c, bool include_cc)
267 {
268 struct ureg_program *shader;
269 struct ureg_src csc[3];
270 struct ureg_src tc;
271 struct ureg_src sampler;
272 struct ureg_src palette;
273 struct ureg_dst texel;
274 struct ureg_dst fragment;
275 unsigned i;
276
277 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
278 if (!shader)
279 return false;
280
281 for (i = 0; include_cc && i < 3; ++i)
282 csc[i] = ureg_DECL_constant(shader, i);
283
284 tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR);
285 sampler = ureg_DECL_sampler(shader, 0);
286 palette = ureg_DECL_sampler(shader, 1);
287
288 texel = ureg_DECL_temporary(shader);
289 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
290
291 /*
292 * texel = tex(tc, sampler)
293 * fragment.xyz = tex(texel, palette) * csc
294 * fragment.a = texel.a
295 */
296 ureg_TEX(shader, texel, TGSI_TEXTURE_2D, tc, sampler);
297 ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_src(texel));
298
299 if (include_cc) {
300 ureg_TEX(shader, texel, TGSI_TEXTURE_1D, ureg_src(texel), palette);
301 for (i = 0; i < 3; ++i)
302 ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(texel));
303 } else {
304 ureg_TEX(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ),
305 TGSI_TEXTURE_1D, ureg_src(texel), palette);
306 }
307
308 ureg_release_temporary(shader, texel);
309 ureg_END(shader);
310
311 return ureg_create_shader_and_destroy(shader, c->pipe);
312 }
313
314 static void *
315 create_frag_shader_rgba(struct vl_compositor *c)
316 {
317 struct ureg_program *shader;
318 struct ureg_src tc, color, sampler;
319 struct ureg_dst texel, fragment;
320
321 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
322 if (!shader)
323 return false;
324
325 tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR);
326 color = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_COLOR, VS_O_COLOR, TGSI_INTERPOLATE_LINEAR);
327 sampler = ureg_DECL_sampler(shader, 0);
328 texel = ureg_DECL_temporary(shader);
329 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
330
331 /*
332 * fragment = tex(tc, sampler)
333 */
334 ureg_TEX(shader, texel, TGSI_TEXTURE_2D, tc, sampler);
335 ureg_MUL(shader, fragment, ureg_src(texel), color);
336 ureg_END(shader);
337
338 return ureg_create_shader_and_destroy(shader, c->pipe);
339 }
340
341 static bool
342 init_shaders(struct vl_compositor *c)
343 {
344 assert(c);
345
346 c->vs = create_vert_shader(c);
347 if (!c->vs) {
348 debug_printf("Unable to create vertex shader.\n");
349 return false;
350 }
351
352 c->fs_video_buffer = create_frag_shader_video_buffer(c);
353 if (!c->fs_video_buffer) {
354 debug_printf("Unable to create YCbCr-to-RGB fragment shader.\n");
355 return false;
356 }
357
358 c->fs_weave = create_frag_shader_weave(c);
359 if (!c->fs_weave) {
360 debug_printf("Unable to create YCbCr-to-RGB weave fragment shader.\n");
361 return false;
362 }
363
364 c->fs_palette.yuv = create_frag_shader_palette(c, true);
365 if (!c->fs_palette.yuv) {
366 debug_printf("Unable to create YUV-Palette-to-RGB fragment shader.\n");
367 return false;
368 }
369
370 c->fs_palette.rgb = create_frag_shader_palette(c, false);
371 if (!c->fs_palette.rgb) {
372 debug_printf("Unable to create RGB-Palette-to-RGB fragment shader.\n");
373 return false;
374 }
375
376 c->fs_rgba = create_frag_shader_rgba(c);
377 if (!c->fs_rgba) {
378 debug_printf("Unable to create RGB-to-RGB fragment shader.\n");
379 return false;
380 }
381
382 return true;
383 }
384
385 static void cleanup_shaders(struct vl_compositor *c)
386 {
387 assert(c);
388
389 c->pipe->delete_vs_state(c->pipe, c->vs);
390 c->pipe->delete_fs_state(c->pipe, c->fs_video_buffer);
391 c->pipe->delete_fs_state(c->pipe, c->fs_weave);
392 c->pipe->delete_fs_state(c->pipe, c->fs_palette.yuv);
393 c->pipe->delete_fs_state(c->pipe, c->fs_palette.rgb);
394 c->pipe->delete_fs_state(c->pipe, c->fs_rgba);
395 }
396
397 static bool
398 init_pipe_state(struct vl_compositor *c)
399 {
400 struct pipe_rasterizer_state rast;
401 struct pipe_sampler_state sampler;
402 struct pipe_blend_state blend;
403 struct pipe_depth_stencil_alpha_state dsa;
404 unsigned i;
405
406 assert(c);
407
408 c->fb_state.nr_cbufs = 1;
409 c->fb_state.zsbuf = NULL;
410
411 memset(&sampler, 0, sizeof(sampler));
412 sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
413 sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
414 sampler.wrap_r = PIPE_TEX_WRAP_REPEAT;
415 sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
416 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
417 sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
418 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
419 sampler.compare_func = PIPE_FUNC_ALWAYS;
420 sampler.normalized_coords = 1;
421
422 c->sampler_linear = c->pipe->create_sampler_state(c->pipe, &sampler);
423
424 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
425 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
426 c->sampler_nearest = c->pipe->create_sampler_state(c->pipe, &sampler);
427
428 memset(&blend, 0, sizeof blend);
429 blend.independent_blend_enable = 0;
430 blend.rt[0].blend_enable = 0;
431 blend.logicop_enable = 0;
432 blend.logicop_func = PIPE_LOGICOP_CLEAR;
433 blend.rt[0].colormask = PIPE_MASK_RGBA;
434 blend.dither = 0;
435 c->blend_clear = c->pipe->create_blend_state(c->pipe, &blend);
436
437 blend.rt[0].blend_enable = 1;
438 blend.rt[0].rgb_func = PIPE_BLEND_ADD;
439 blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
440 blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_INV_SRC_ALPHA;
441 blend.rt[0].alpha_func = PIPE_BLEND_ADD;
442 blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
443 blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
444 c->blend_add = c->pipe->create_blend_state(c->pipe, &blend);
445
446 memset(&rast, 0, sizeof rast);
447 rast.flatshade = 0;
448 rast.front_ccw = 1;
449 rast.cull_face = PIPE_FACE_NONE;
450 rast.fill_back = PIPE_POLYGON_MODE_FILL;
451 rast.fill_front = PIPE_POLYGON_MODE_FILL;
452 rast.scissor = 1;
453 rast.line_width = 1;
454 rast.point_size_per_vertex = 1;
455 rast.offset_units = 1;
456 rast.offset_scale = 1;
457 rast.half_pixel_center = 1;
458 rast.bottom_edge_rule = 1;
459 rast.depth_clip = 1;
460
461 c->rast = c->pipe->create_rasterizer_state(c->pipe, &rast);
462
463 memset(&dsa, 0, sizeof dsa);
464 dsa.depth.enabled = 0;
465 dsa.depth.writemask = 0;
466 dsa.depth.func = PIPE_FUNC_ALWAYS;
467 for (i = 0; i < 2; ++i) {
468 dsa.stencil[i].enabled = 0;
469 dsa.stencil[i].func = PIPE_FUNC_ALWAYS;
470 dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP;
471 dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP;
472 dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP;
473 dsa.stencil[i].valuemask = 0;
474 dsa.stencil[i].writemask = 0;
475 }
476 dsa.alpha.enabled = 0;
477 dsa.alpha.func = PIPE_FUNC_ALWAYS;
478 dsa.alpha.ref_value = 0;
479 c->dsa = c->pipe->create_depth_stencil_alpha_state(c->pipe, &dsa);
480 c->pipe->bind_depth_stencil_alpha_state(c->pipe, c->dsa);
481
482 return true;
483 }
484
485 static void cleanup_pipe_state(struct vl_compositor *c)
486 {
487 assert(c);
488
489 /* Asserted in softpipe_delete_fs_state() for some reason */
490 c->pipe->bind_vs_state(c->pipe, NULL);
491 c->pipe->bind_fs_state(c->pipe, NULL);
492
493 c->pipe->delete_depth_stencil_alpha_state(c->pipe, c->dsa);
494 c->pipe->delete_sampler_state(c->pipe, c->sampler_linear);
495 c->pipe->delete_sampler_state(c->pipe, c->sampler_nearest);
496 c->pipe->delete_blend_state(c->pipe, c->blend_clear);
497 c->pipe->delete_blend_state(c->pipe, c->blend_add);
498 c->pipe->delete_rasterizer_state(c->pipe, c->rast);
499 }
500
501 static bool
502 init_buffers(struct vl_compositor *c)
503 {
504 struct pipe_vertex_element vertex_elems[3];
505
506 assert(c);
507
508 /*
509 * Create our vertex buffer and vertex buffer elements
510 */
511 c->vertex_buf.stride = sizeof(struct vertex2f) + sizeof(struct vertex4f) * 2;
512 c->vertex_buf.buffer_offset = 0;
513 c->vertex_buf.buffer = NULL;
514
515 vertex_elems[0].src_offset = 0;
516 vertex_elems[0].instance_divisor = 0;
517 vertex_elems[0].vertex_buffer_index = 0;
518 vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
519 vertex_elems[1].src_offset = sizeof(struct vertex2f);
520 vertex_elems[1].instance_divisor = 0;
521 vertex_elems[1].vertex_buffer_index = 0;
522 vertex_elems[1].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
523 vertex_elems[2].src_offset = sizeof(struct vertex2f) + sizeof(struct vertex4f);
524 vertex_elems[2].instance_divisor = 0;
525 vertex_elems[2].vertex_buffer_index = 0;
526 vertex_elems[2].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
527 c->vertex_elems_state = c->pipe->create_vertex_elements_state(c->pipe, 3, vertex_elems);
528
529 return true;
530 }
531
532 static void
533 cleanup_buffers(struct vl_compositor *c)
534 {
535 assert(c);
536
537 c->pipe->delete_vertex_elements_state(c->pipe, c->vertex_elems_state);
538 pipe_resource_reference(&c->vertex_buf.buffer, NULL);
539 }
540
541 static inline struct u_rect
542 default_rect(struct vl_compositor_layer *layer)
543 {
544 struct pipe_resource *res = layer->sampler_views[0]->texture;
545 struct u_rect rect = { 0, res->width0, 0, res->height0 * res->array_size };
546 return rect;
547 }
548
549 static inline struct vertex2f
550 calc_topleft(struct vertex2f size, struct u_rect rect)
551 {
552 struct vertex2f res = { rect.x0 / size.x, rect.y0 / size.y };
553 return res;
554 }
555
556 static inline struct vertex2f
557 calc_bottomright(struct vertex2f size, struct u_rect rect)
558 {
559 struct vertex2f res = { rect.x1 / size.x, rect.y1 / size.y };
560 return res;
561 }
562
563 static inline void
564 calc_src_and_dst(struct vl_compositor_layer *layer, unsigned width, unsigned height,
565 struct u_rect src, struct u_rect dst)
566 {
567 struct vertex2f size = { width, height };
568
569 layer->src.tl = calc_topleft(size, src);
570 layer->src.br = calc_bottomright(size, src);
571 layer->dst.tl = calc_topleft(size, dst);
572 layer->dst.br = calc_bottomright(size, dst);
573 layer->zw.x = 0.0f;
574 layer->zw.y = size.y;
575 }
576
577 static void
578 gen_rect_verts(struct vertex2f *vb, struct vl_compositor_layer *layer)
579 {
580 struct vertex2f tl, tr, br, bl;
581
582 assert(vb && layer);
583
584 switch (layer->rotate) {
585 default:
586 case VL_COMPOSITOR_ROTATE_0:
587 tl = layer->dst.tl;
588 tr.x = layer->dst.br.x;
589 tr.y = layer->dst.tl.y;
590 br = layer->dst.br;
591 bl.x = layer->dst.tl.x;
592 bl.y = layer->dst.br.y;
593 break;
594 case VL_COMPOSITOR_ROTATE_90:
595 tl.x = layer->dst.br.x;
596 tl.y = layer->dst.tl.y;
597 tr = layer->dst.br;
598 br.x = layer->dst.tl.x;
599 br.y = layer->dst.br.y;
600 bl = layer->dst.tl;
601 break;
602 case VL_COMPOSITOR_ROTATE_180:
603 tl = layer->dst.br;
604 tr.x = layer->dst.tl.x;
605 tr.y = layer->dst.br.y;
606 br = layer->dst.tl;
607 bl.x = layer->dst.br.x;
608 bl.y = layer->dst.tl.y;
609 break;
610 case VL_COMPOSITOR_ROTATE_270:
611 tl.x = layer->dst.tl.x;
612 tl.y = layer->dst.br.y;
613 tr = layer->dst.tl;
614 br.x = layer->dst.br.x;
615 br.y = layer->dst.tl.y;
616 bl = layer->dst.br;
617 break;
618 }
619
620 vb[ 0].x = tl.x;
621 vb[ 0].y = tl.y;
622 vb[ 1].x = layer->src.tl.x;
623 vb[ 1].y = layer->src.tl.y;
624 vb[ 2] = layer->zw;
625 vb[ 3].x = layer->colors[0].x;
626 vb[ 3].y = layer->colors[0].y;
627 vb[ 4].x = layer->colors[0].z;
628 vb[ 4].y = layer->colors[0].w;
629
630 vb[ 5].x = tr.x;
631 vb[ 5].y = tr.y;
632 vb[ 6].x = layer->src.br.x;
633 vb[ 6].y = layer->src.tl.y;
634 vb[ 7] = layer->zw;
635 vb[ 8].x = layer->colors[1].x;
636 vb[ 8].y = layer->colors[1].y;
637 vb[ 9].x = layer->colors[1].z;
638 vb[ 9].y = layer->colors[1].w;
639
640 vb[10].x = br.x;
641 vb[10].y = br.y;
642 vb[11].x = layer->src.br.x;
643 vb[11].y = layer->src.br.y;
644 vb[12] = layer->zw;
645 vb[13].x = layer->colors[2].x;
646 vb[13].y = layer->colors[2].y;
647 vb[14].x = layer->colors[2].z;
648 vb[14].y = layer->colors[2].w;
649
650 vb[15].x = bl.x;
651 vb[15].y = bl.y;
652 vb[16].x = layer->src.tl.x;
653 vb[16].y = layer->src.br.y;
654 vb[17] = layer->zw;
655 vb[18].x = layer->colors[3].x;
656 vb[18].y = layer->colors[3].y;
657 vb[19].x = layer->colors[3].z;
658 vb[19].y = layer->colors[3].w;
659 }
660
661 static inline struct u_rect
662 calc_drawn_area(struct vl_compositor_state *s, struct vl_compositor_layer *layer)
663 {
664 struct vertex2f tl, br;
665 struct u_rect result;
666
667 assert(s && layer);
668
669 // rotate
670 switch (layer->rotate) {
671 default:
672 case VL_COMPOSITOR_ROTATE_0:
673 tl = layer->dst.tl;
674 br = layer->dst.br;
675 break;
676 case VL_COMPOSITOR_ROTATE_90:
677 tl.x = layer->dst.br.x;
678 tl.y = layer->dst.tl.y;
679 br.x = layer->dst.tl.x;
680 br.y = layer->dst.br.y;
681 break;
682 case VL_COMPOSITOR_ROTATE_180:
683 tl = layer->dst.br;
684 br = layer->dst.tl;
685 break;
686 case VL_COMPOSITOR_ROTATE_270:
687 tl.x = layer->dst.tl.x;
688 tl.y = layer->dst.br.y;
689 br.x = layer->dst.br.x;
690 br.y = layer->dst.tl.y;
691 break;
692 }
693
694 // scale
695 result.x0 = tl.x * layer->viewport.scale[0] + layer->viewport.translate[0];
696 result.y0 = tl.y * layer->viewport.scale[1] + layer->viewport.translate[1];
697 result.x1 = br.x * layer->viewport.scale[0] + layer->viewport.translate[0];
698 result.y1 = br.y * layer->viewport.scale[1] + layer->viewport.translate[1];
699
700 // and clip
701 result.x0 = MAX2(result.x0, s->scissor.minx);
702 result.y0 = MAX2(result.y0, s->scissor.miny);
703 result.x1 = MIN2(result.x1, s->scissor.maxx);
704 result.y1 = MIN2(result.y1, s->scissor.maxy);
705 return result;
706 }
707
708 static void
709 gen_vertex_data(struct vl_compositor *c, struct vl_compositor_state *s, struct u_rect *dirty)
710 {
711 struct vertex2f *vb;
712 unsigned i;
713
714 assert(c);
715
716 /* Allocate new memory for vertices. */
717 u_upload_alloc(c->upload, 0,
718 c->vertex_buf.stride * VL_COMPOSITOR_MAX_LAYERS * 4, /* size */
719 4, /* alignment */
720 &c->vertex_buf.buffer_offset, &c->vertex_buf.buffer,
721 (void**)&vb);
722
723 for (i = 0; i < VL_COMPOSITOR_MAX_LAYERS; i++) {
724 if (s->used_layers & (1 << i)) {
725 struct vl_compositor_layer *layer = &s->layers[i];
726 gen_rect_verts(vb, layer);
727 vb += 20;
728
729 if (!layer->viewport_valid) {
730 layer->viewport.scale[0] = c->fb_state.width;
731 layer->viewport.scale[1] = c->fb_state.height;
732 layer->viewport.translate[0] = 0;
733 layer->viewport.translate[1] = 0;
734 }
735
736 if (dirty && layer->clearing) {
737 struct u_rect drawn = calc_drawn_area(s, layer);
738 if (
739 dirty->x0 >= drawn.x0 &&
740 dirty->y0 >= drawn.y0 &&
741 dirty->x1 <= drawn.x1 &&
742 dirty->y1 <= drawn.y1) {
743
744 // We clear the dirty area anyway, no need for clear_render_target
745 dirty->x0 = dirty->y0 = MAX_DIRTY;
746 dirty->x1 = dirty->y1 = MIN_DIRTY;
747 }
748 }
749 }
750 }
751
752 u_upload_unmap(c->upload);
753 }
754
755 static void
756 draw_layers(struct vl_compositor *c, struct vl_compositor_state *s, struct u_rect *dirty)
757 {
758 unsigned vb_index, i;
759
760 assert(c);
761
762 for (i = 0, vb_index = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i) {
763 if (s->used_layers & (1 << i)) {
764 struct vl_compositor_layer *layer = &s->layers[i];
765 struct pipe_sampler_view **samplers = &layer->sampler_views[0];
766 unsigned num_sampler_views = !samplers[1] ? 1 : !samplers[2] ? 2 : 3;
767 void *blend = layer->blend ? layer->blend : i ? c->blend_add : c->blend_clear;
768
769 c->pipe->bind_blend_state(c->pipe, blend);
770 c->pipe->set_viewport_states(c->pipe, 0, 1, &layer->viewport);
771 c->pipe->bind_fs_state(c->pipe, layer->fs);
772 c->pipe->bind_sampler_states(c->pipe, PIPE_SHADER_FRAGMENT, 0,
773 num_sampler_views, layer->samplers);
774 c->pipe->set_sampler_views(c->pipe, PIPE_SHADER_FRAGMENT, 0,
775 num_sampler_views, samplers);
776
777 util_draw_arrays(c->pipe, PIPE_PRIM_QUADS, vb_index * 4, 4);
778 vb_index++;
779
780 if (dirty) {
781 // Remember the currently drawn area as dirty for the next draw command
782 struct u_rect drawn = calc_drawn_area(s, layer);
783 dirty->x0 = MIN2(drawn.x0, dirty->x0);
784 dirty->y0 = MIN2(drawn.y0, dirty->y0);
785 dirty->x1 = MAX2(drawn.x1, dirty->x1);
786 dirty->y1 = MAX2(drawn.y1, dirty->y1);
787 }
788 }
789 }
790 }
791
792 void
793 vl_compositor_reset_dirty_area(struct u_rect *dirty)
794 {
795 assert(dirty);
796
797 dirty->x0 = dirty->y0 = MIN_DIRTY;
798 dirty->x1 = dirty->y1 = MAX_DIRTY;
799 }
800
801 void
802 vl_compositor_set_clear_color(struct vl_compositor_state *s, union pipe_color_union *color)
803 {
804 assert(s);
805 assert(color);
806
807 s->clear_color = *color;
808 }
809
810 void
811 vl_compositor_get_clear_color(struct vl_compositor_state *s, union pipe_color_union *color)
812 {
813 assert(s);
814 assert(color);
815
816 *color = s->clear_color;
817 }
818
819 void
820 vl_compositor_clear_layers(struct vl_compositor_state *s)
821 {
822 unsigned i, j;
823
824 assert(s);
825
826 s->used_layers = 0;
827 for ( i = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i) {
828 struct vertex4f v_one = { 1.0f, 1.0f, 1.0f, 1.0f };
829 s->layers[i].clearing = i ? false : true;
830 s->layers[i].blend = NULL;
831 s->layers[i].fs = NULL;
832 s->layers[i].viewport.scale[2] = 1;
833 s->layers[i].viewport.translate[2] = 0;
834 s->layers[i].rotate = VL_COMPOSITOR_ROTATE_0;
835
836 for ( j = 0; j < 3; j++)
837 pipe_sampler_view_reference(&s->layers[i].sampler_views[j], NULL);
838 for ( j = 0; j < 4; ++j)
839 s->layers[i].colors[j] = v_one;
840 }
841 }
842
843 void
844 vl_compositor_cleanup(struct vl_compositor *c)
845 {
846 assert(c);
847
848 u_upload_destroy(c->upload);
849 cleanup_buffers(c);
850 cleanup_shaders(c);
851 cleanup_pipe_state(c);
852 }
853
854 void
855 vl_compositor_set_csc_matrix(struct vl_compositor_state *s, vl_csc_matrix const *matrix)
856 {
857 struct pipe_transfer *buf_transfer;
858
859 assert(s);
860
861 memcpy
862 (
863 pipe_buffer_map(s->pipe, s->csc_matrix,
864 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
865 &buf_transfer),
866 matrix,
867 sizeof(vl_csc_matrix)
868 );
869
870 pipe_buffer_unmap(s->pipe, buf_transfer);
871 }
872
873 void
874 vl_compositor_set_dst_clip(struct vl_compositor_state *s, struct u_rect *dst_clip)
875 {
876 assert(s);
877
878 s->scissor_valid = dst_clip != NULL;
879 if (dst_clip) {
880 s->scissor.minx = dst_clip->x0;
881 s->scissor.miny = dst_clip->y0;
882 s->scissor.maxx = dst_clip->x1;
883 s->scissor.maxy = dst_clip->y1;
884 }
885 }
886
887 void
888 vl_compositor_set_layer_blend(struct vl_compositor_state *s,
889 unsigned layer, void *blend,
890 bool is_clearing)
891 {
892 assert(s && blend);
893
894 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
895
896 s->layers[layer].clearing = is_clearing;
897 s->layers[layer].blend = blend;
898 }
899
900 void
901 vl_compositor_set_layer_dst_area(struct vl_compositor_state *s,
902 unsigned layer, struct u_rect *dst_area)
903 {
904 assert(s);
905
906 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
907
908 s->layers[layer].viewport_valid = dst_area != NULL;
909 if (dst_area) {
910 s->layers[layer].viewport.scale[0] = dst_area->x1 - dst_area->x0;
911 s->layers[layer].viewport.scale[1] = dst_area->y1 - dst_area->y0;
912 s->layers[layer].viewport.translate[0] = dst_area->x0;
913 s->layers[layer].viewport.translate[1] = dst_area->y0;
914 }
915 }
916
917 void
918 vl_compositor_set_buffer_layer(struct vl_compositor_state *s,
919 struct vl_compositor *c,
920 unsigned layer,
921 struct pipe_video_buffer *buffer,
922 struct u_rect *src_rect,
923 struct u_rect *dst_rect,
924 enum vl_compositor_deinterlace deinterlace)
925 {
926 struct pipe_sampler_view **sampler_views;
927 unsigned i;
928
929 assert(s && c && buffer);
930
931 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
932
933 s->used_layers |= 1 << layer;
934 sampler_views = buffer->get_sampler_view_components(buffer);
935 for (i = 0; i < 3; ++i) {
936 s->layers[layer].samplers[i] = c->sampler_linear;
937 pipe_sampler_view_reference(&s->layers[layer].sampler_views[i], sampler_views[i]);
938 }
939
940 calc_src_and_dst(&s->layers[layer], buffer->width, buffer->height,
941 src_rect ? *src_rect : default_rect(&s->layers[layer]),
942 dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
943
944 if (buffer->interlaced) {
945 float half_a_line = 0.5f / s->layers[layer].zw.y;
946 switch(deinterlace) {
947 case VL_COMPOSITOR_WEAVE:
948 s->layers[layer].fs = c->fs_weave;
949 break;
950
951 case VL_COMPOSITOR_BOB_TOP:
952 s->layers[layer].zw.x = 0.0f;
953 s->layers[layer].src.tl.y += half_a_line;
954 s->layers[layer].src.br.y += half_a_line;
955 s->layers[layer].fs = c->fs_video_buffer;
956 break;
957
958 case VL_COMPOSITOR_BOB_BOTTOM:
959 s->layers[layer].zw.x = 1.0f;
960 s->layers[layer].src.tl.y -= half_a_line;
961 s->layers[layer].src.br.y -= half_a_line;
962 s->layers[layer].fs = c->fs_video_buffer;
963 break;
964 }
965
966 } else
967 s->layers[layer].fs = c->fs_video_buffer;
968 }
969
970 void
971 vl_compositor_set_palette_layer(struct vl_compositor_state *s,
972 struct vl_compositor *c,
973 unsigned layer,
974 struct pipe_sampler_view *indexes,
975 struct pipe_sampler_view *palette,
976 struct u_rect *src_rect,
977 struct u_rect *dst_rect,
978 bool include_color_conversion)
979 {
980 assert(s && c && indexes && palette);
981
982 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
983
984 s->used_layers |= 1 << layer;
985
986 s->layers[layer].fs = include_color_conversion ?
987 c->fs_palette.yuv : c->fs_palette.rgb;
988
989 s->layers[layer].samplers[0] = c->sampler_linear;
990 s->layers[layer].samplers[1] = c->sampler_nearest;
991 s->layers[layer].samplers[2] = NULL;
992 pipe_sampler_view_reference(&s->layers[layer].sampler_views[0], indexes);
993 pipe_sampler_view_reference(&s->layers[layer].sampler_views[1], palette);
994 pipe_sampler_view_reference(&s->layers[layer].sampler_views[2], NULL);
995 calc_src_and_dst(&s->layers[layer], indexes->texture->width0, indexes->texture->height0,
996 src_rect ? *src_rect : default_rect(&s->layers[layer]),
997 dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
998 }
999
1000 void
1001 vl_compositor_set_rgba_layer(struct vl_compositor_state *s,
1002 struct vl_compositor *c,
1003 unsigned layer,
1004 struct pipe_sampler_view *rgba,
1005 struct u_rect *src_rect,
1006 struct u_rect *dst_rect,
1007 struct vertex4f *colors)
1008 {
1009 unsigned i;
1010
1011 assert(s && c && rgba);
1012
1013 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
1014
1015 s->used_layers |= 1 << layer;
1016 s->layers[layer].fs = c->fs_rgba;
1017 s->layers[layer].samplers[0] = c->sampler_linear;
1018 s->layers[layer].samplers[1] = NULL;
1019 s->layers[layer].samplers[2] = NULL;
1020 pipe_sampler_view_reference(&s->layers[layer].sampler_views[0], rgba);
1021 pipe_sampler_view_reference(&s->layers[layer].sampler_views[1], NULL);
1022 pipe_sampler_view_reference(&s->layers[layer].sampler_views[2], NULL);
1023 calc_src_and_dst(&s->layers[layer], rgba->texture->width0, rgba->texture->height0,
1024 src_rect ? *src_rect : default_rect(&s->layers[layer]),
1025 dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
1026
1027 if (colors)
1028 for (i = 0; i < 4; ++i)
1029 s->layers[layer].colors[i] = colors[i];
1030 }
1031
1032 void
1033 vl_compositor_set_layer_rotation(struct vl_compositor_state *s,
1034 unsigned layer,
1035 enum vl_compositor_rotation rotate)
1036 {
1037 assert(s);
1038 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
1039 s->layers[layer].rotate = rotate;
1040 }
1041
1042 void
1043 vl_compositor_render(struct vl_compositor_state *s,
1044 struct vl_compositor *c,
1045 struct pipe_surface *dst_surface,
1046 struct u_rect *dirty_area,
1047 bool clear_dirty)
1048 {
1049 assert(c);
1050 assert(dst_surface);
1051
1052 c->fb_state.width = dst_surface->width;
1053 c->fb_state.height = dst_surface->height;
1054 c->fb_state.cbufs[0] = dst_surface;
1055
1056 if (!s->scissor_valid) {
1057 s->scissor.minx = 0;
1058 s->scissor.miny = 0;
1059 s->scissor.maxx = dst_surface->width;
1060 s->scissor.maxy = dst_surface->height;
1061 }
1062 c->pipe->set_scissor_states(c->pipe, 0, 1, &s->scissor);
1063
1064 gen_vertex_data(c, s, dirty_area);
1065
1066 if (clear_dirty && dirty_area &&
1067 (dirty_area->x0 < dirty_area->x1 || dirty_area->y0 < dirty_area->y1)) {
1068
1069 c->pipe->clear_render_target(c->pipe, dst_surface, &s->clear_color,
1070 0, 0, dst_surface->width, dst_surface->height);
1071 dirty_area->x0 = dirty_area->y0 = MAX_DIRTY;
1072 dirty_area->x1 = dirty_area->y1 = MIN_DIRTY;
1073 }
1074
1075 c->pipe->set_framebuffer_state(c->pipe, &c->fb_state);
1076 c->pipe->bind_vs_state(c->pipe, c->vs);
1077 c->pipe->set_vertex_buffers(c->pipe, 0, 1, &c->vertex_buf);
1078 c->pipe->bind_vertex_elements_state(c->pipe, c->vertex_elems_state);
1079 pipe_set_constant_buffer(c->pipe, PIPE_SHADER_FRAGMENT, 0, s->csc_matrix);
1080 c->pipe->bind_rasterizer_state(c->pipe, c->rast);
1081
1082 draw_layers(c, s, dirty_area);
1083 }
1084
1085 bool
1086 vl_compositor_init(struct vl_compositor *c, struct pipe_context *pipe)
1087 {
1088 assert(c);
1089
1090 memset(c, 0, sizeof(*c));
1091
1092 c->pipe = pipe;
1093
1094 c->upload = u_upload_create(pipe, 128 * 1024, 4, PIPE_BIND_VERTEX_BUFFER);
1095
1096 if (!c->upload)
1097 return false;
1098
1099 if (!init_pipe_state(c)) {
1100 u_upload_destroy(c->upload);
1101 return false;
1102 }
1103
1104 if (!init_shaders(c)) {
1105 u_upload_destroy(c->upload);
1106 cleanup_pipe_state(c);
1107 return false;
1108 }
1109
1110 if (!init_buffers(c)) {
1111 u_upload_destroy(c->upload);
1112 cleanup_shaders(c);
1113 cleanup_pipe_state(c);
1114 return false;
1115 }
1116
1117 return true;
1118 }
1119
1120 bool
1121 vl_compositor_init_state(struct vl_compositor_state *s, struct pipe_context *pipe)
1122 {
1123 vl_csc_matrix csc_matrix;
1124
1125 assert(s);
1126
1127 memset(s, 0, sizeof(*s));
1128
1129 s->pipe = pipe;
1130
1131 s->clear_color.f[0] = s->clear_color.f[1] = 0.0f;
1132 s->clear_color.f[2] = s->clear_color.f[3] = 0.0f;
1133
1134 /*
1135 * Create our fragment shader's constant buffer
1136 * Const buffer contains the color conversion matrix and bias vectors
1137 */
1138 /* XXX: Create with IMMUTABLE/STATIC... although it does change every once in a long while... */
1139 s->csc_matrix = pipe_buffer_create
1140 (
1141 pipe->screen,
1142 PIPE_BIND_CONSTANT_BUFFER,
1143 PIPE_USAGE_DEFAULT,
1144 sizeof(csc_matrix)
1145 );
1146
1147 vl_compositor_clear_layers(s);
1148
1149 vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_IDENTITY, NULL, true, &csc_matrix);
1150 vl_compositor_set_csc_matrix(s, (const vl_csc_matrix *)&csc_matrix);
1151
1152 return true;
1153 }
1154
1155 void
1156 vl_compositor_cleanup_state(struct vl_compositor_state *s)
1157 {
1158 assert(s);
1159
1160 vl_compositor_clear_layers(s);
1161 pipe_resource_reference(&s->csc_matrix, NULL);
1162 }