vl: Remove unused declaration
[mesa.git] / src / gallium / auxiliary / vl / vl_compositor.c
1 /**************************************************************************
2 *
3 * Copyright 2009 Younes Manton.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <assert.h>
29
30 #include "pipe/p_compiler.h"
31 #include "pipe/p_context.h"
32
33 #include "util/u_memory.h"
34 #include "util/u_draw.h"
35 #include "util/u_surface.h"
36
37 #include "tgsi/tgsi_ureg.h"
38
39 #include "vl_csc.h"
40 #include "vl_types.h"
41 #include "vl_compositor.h"
42
43 #define MIN_DIRTY (0)
44 #define MAX_DIRTY (1 << 15)
45
46 typedef float csc_matrix[16];
47
48 static void *
49 create_vert_shader(struct vl_compositor *c)
50 {
51 struct ureg_program *shader;
52 struct ureg_src vpos, vtex;
53 struct ureg_dst o_vpos, o_vtex;
54
55 shader = ureg_create(TGSI_PROCESSOR_VERTEX);
56 if (!shader)
57 return false;
58
59 vpos = ureg_DECL_vs_input(shader, 0);
60 vtex = ureg_DECL_vs_input(shader, 1);
61 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, 0);
62 o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, 1);
63
64 /*
65 * o_vpos = vpos
66 * o_vtex = vtex
67 */
68 ureg_MOV(shader, o_vpos, vpos);
69 ureg_MOV(shader, o_vtex, vtex);
70
71 ureg_END(shader);
72
73 return ureg_create_shader_and_destroy(shader, c->pipe);
74 }
75
76 static void *
77 create_frag_shader_video_buffer(struct vl_compositor *c)
78 {
79 struct ureg_program *shader;
80 struct ureg_src tc;
81 struct ureg_src csc[3];
82 struct ureg_src sampler[3];
83 struct ureg_dst texel;
84 struct ureg_dst fragment;
85 unsigned i;
86
87 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
88 if (!shader)
89 return false;
90
91 tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, 1, TGSI_INTERPOLATE_LINEAR);
92 for (i = 0; i < 3; ++i) {
93 csc[i] = ureg_DECL_constant(shader, i);
94 sampler[i] = ureg_DECL_sampler(shader, i);
95 }
96 texel = ureg_DECL_temporary(shader);
97 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
98
99 /*
100 * texel.xyz = tex(tc, sampler[i])
101 * fragment = csc * texel
102 */
103 for (i = 0; i < 3; ++i)
104 ureg_TEX(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_2D, tc, sampler[i]);
105
106 ureg_MOV(shader, ureg_writemask(texel, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f));
107
108 for (i = 0; i < 3; ++i)
109 ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(texel));
110
111 ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f));
112
113 ureg_release_temporary(shader, texel);
114 ureg_END(shader);
115
116 return ureg_create_shader_and_destroy(shader, c->pipe);
117 }
118
119 static void *
120 create_frag_shader_palette(struct vl_compositor *c, bool include_cc)
121 {
122 struct ureg_program *shader;
123 struct ureg_src csc[3];
124 struct ureg_src tc;
125 struct ureg_src sampler;
126 struct ureg_src palette;
127 struct ureg_dst texel;
128 struct ureg_dst fragment;
129 unsigned i;
130
131 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
132 if (!shader)
133 return false;
134
135 for (i = 0; include_cc && i < 3; ++i)
136 csc[i] = ureg_DECL_constant(shader, i);
137
138 tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, 1, TGSI_INTERPOLATE_LINEAR);
139 sampler = ureg_DECL_sampler(shader, 0);
140 palette = ureg_DECL_sampler(shader, 1);
141
142 texel = ureg_DECL_temporary(shader);
143 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
144
145 /*
146 * texel = tex(tc, sampler)
147 * fragment.xyz = tex(texel, palette) * csc
148 * fragment.a = texel.a
149 */
150 ureg_TEX(shader, texel, TGSI_TEXTURE_2D, tc, sampler);
151 ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_src(texel));
152
153 if (include_cc) {
154 ureg_TEX(shader, texel, TGSI_TEXTURE_1D, ureg_src(texel), palette);
155 for (i = 0; i < 3; ++i)
156 ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(texel));
157 } else {
158 ureg_TEX(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ),
159 TGSI_TEXTURE_1D, ureg_src(texel), palette);
160 }
161
162 ureg_release_temporary(shader, texel);
163 ureg_END(shader);
164
165 return ureg_create_shader_and_destroy(shader, c->pipe);
166 }
167
168 static void *
169 create_frag_shader_rgba(struct vl_compositor *c)
170 {
171 struct ureg_program *shader;
172 struct ureg_src tc;
173 struct ureg_src sampler;
174 struct ureg_dst fragment;
175
176 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
177 if (!shader)
178 return false;
179
180 tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, 1, TGSI_INTERPOLATE_LINEAR);
181 sampler = ureg_DECL_sampler(shader, 0);
182 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
183
184 /*
185 * fragment = tex(tc, sampler)
186 */
187 ureg_TEX(shader, fragment, TGSI_TEXTURE_2D, tc, sampler);
188 ureg_END(shader);
189
190 return ureg_create_shader_and_destroy(shader, c->pipe);
191 }
192
193 static bool
194 init_shaders(struct vl_compositor *c)
195 {
196 assert(c);
197
198 c->vs = create_vert_shader(c);
199 if (!c->vs) {
200 debug_printf("Unable to create vertex shader.\n");
201 return false;
202 }
203
204 c->fs_video_buffer = create_frag_shader_video_buffer(c);
205 if (!c->fs_video_buffer) {
206 debug_printf("Unable to create YCbCr-to-RGB fragment shader.\n");
207 return false;
208 }
209
210 c->fs_palette.yuv = create_frag_shader_palette(c, true);
211 if (!c->fs_palette.yuv) {
212 debug_printf("Unable to create YUV-Palette-to-RGB fragment shader.\n");
213 return false;
214 }
215
216 c->fs_palette.rgb = create_frag_shader_palette(c, false);
217 if (!c->fs_palette.rgb) {
218 debug_printf("Unable to create RGB-Palette-to-RGB fragment shader.\n");
219 return false;
220 }
221
222 c->fs_rgba = create_frag_shader_rgba(c);
223 if (!c->fs_rgba) {
224 debug_printf("Unable to create RGB-to-RGB fragment shader.\n");
225 return false;
226 }
227
228 return true;
229 }
230
231 static void cleanup_shaders(struct vl_compositor *c)
232 {
233 assert(c);
234
235 c->pipe->delete_vs_state(c->pipe, c->vs);
236 c->pipe->delete_fs_state(c->pipe, c->fs_video_buffer);
237 c->pipe->delete_fs_state(c->pipe, c->fs_palette.yuv);
238 c->pipe->delete_fs_state(c->pipe, c->fs_palette.rgb);
239 c->pipe->delete_fs_state(c->pipe, c->fs_rgba);
240 }
241
242 static bool
243 init_pipe_state(struct vl_compositor *c)
244 {
245 struct pipe_rasterizer_state rast;
246 struct pipe_sampler_state sampler;
247 struct pipe_blend_state blend;
248 struct pipe_depth_stencil_alpha_state dsa;
249 unsigned i;
250
251 assert(c);
252
253 c->fb_state.nr_cbufs = 1;
254 c->fb_state.zsbuf = NULL;
255
256 c->viewport.scale[2] = 1;
257 c->viewport.scale[3] = 1;
258 c->viewport.translate[2] = 0;
259 c->viewport.translate[3] = 0;
260
261 memset(&sampler, 0, sizeof(sampler));
262 sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
263 sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
264 sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
265 sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
266 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
267 sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
268 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
269 sampler.compare_func = PIPE_FUNC_ALWAYS;
270 sampler.normalized_coords = 1;
271
272 c->sampler_linear = c->pipe->create_sampler_state(c->pipe, &sampler);
273
274 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
275 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
276 c->sampler_nearest = c->pipe->create_sampler_state(c->pipe, &sampler);
277
278 memset(&blend, 0, sizeof blend);
279 blend.independent_blend_enable = 0;
280 blend.rt[0].blend_enable = 0;
281 blend.logicop_enable = 0;
282 blend.logicop_func = PIPE_LOGICOP_CLEAR;
283 blend.rt[0].colormask = PIPE_MASK_RGBA;
284 blend.dither = 0;
285 c->blend_clear = c->pipe->create_blend_state(c->pipe, &blend);
286
287 blend.rt[0].blend_enable = 1;
288 blend.rt[0].rgb_func = PIPE_BLEND_ADD;
289 blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
290 blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_INV_SRC_ALPHA;
291 blend.rt[0].alpha_func = PIPE_BLEND_ADD;
292 blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
293 blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
294 c->blend_add = c->pipe->create_blend_state(c->pipe, &blend);
295
296 memset(&rast, 0, sizeof rast);
297 rast.flatshade = 1;
298 rast.front_ccw = 1;
299 rast.cull_face = PIPE_FACE_NONE;
300 rast.fill_back = PIPE_POLYGON_MODE_FILL;
301 rast.fill_front = PIPE_POLYGON_MODE_FILL;
302 rast.scissor = 1;
303 rast.line_width = 1;
304 rast.point_size_per_vertex = 1;
305 rast.offset_units = 1;
306 rast.offset_scale = 1;
307 rast.gl_rasterization_rules = 1;
308
309 c->rast = c->pipe->create_rasterizer_state(c->pipe, &rast);
310
311 memset(&dsa, 0, sizeof dsa);
312 dsa.depth.enabled = 0;
313 dsa.depth.writemask = 0;
314 dsa.depth.func = PIPE_FUNC_ALWAYS;
315 for (i = 0; i < 2; ++i) {
316 dsa.stencil[i].enabled = 0;
317 dsa.stencil[i].func = PIPE_FUNC_ALWAYS;
318 dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP;
319 dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP;
320 dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP;
321 dsa.stencil[i].valuemask = 0;
322 dsa.stencil[i].writemask = 0;
323 }
324 dsa.alpha.enabled = 0;
325 dsa.alpha.func = PIPE_FUNC_ALWAYS;
326 dsa.alpha.ref_value = 0;
327 c->dsa = c->pipe->create_depth_stencil_alpha_state(c->pipe, &dsa);
328 c->pipe->bind_depth_stencil_alpha_state(c->pipe, c->dsa);
329
330 return true;
331 }
332
333 static void cleanup_pipe_state(struct vl_compositor *c)
334 {
335 assert(c);
336
337 /* Asserted in softpipe_delete_fs_state() for some reason */
338 c->pipe->bind_vs_state(c->pipe, NULL);
339 c->pipe->bind_fs_state(c->pipe, NULL);
340
341 c->pipe->delete_depth_stencil_alpha_state(c->pipe, c->dsa);
342 c->pipe->delete_sampler_state(c->pipe, c->sampler_linear);
343 c->pipe->delete_sampler_state(c->pipe, c->sampler_nearest);
344 c->pipe->delete_blend_state(c->pipe, c->blend_clear);
345 c->pipe->delete_blend_state(c->pipe, c->blend_add);
346 c->pipe->delete_rasterizer_state(c->pipe, c->rast);
347 }
348
349 static bool
350 create_vertex_buffer(struct vl_compositor *c)
351 {
352 assert(c);
353
354 pipe_resource_reference(&c->vertex_buf.buffer, NULL);
355 c->vertex_buf.buffer = pipe_buffer_create
356 (
357 c->pipe->screen,
358 PIPE_BIND_VERTEX_BUFFER,
359 PIPE_USAGE_STREAM,
360 sizeof(struct vertex4f) * VL_COMPOSITOR_MAX_LAYERS * 4
361 );
362
363 return c->vertex_buf.buffer != NULL;
364 }
365
366 static bool
367 init_buffers(struct vl_compositor *c)
368 {
369 struct pipe_vertex_element vertex_elems[2];
370
371 assert(c);
372
373 /*
374 * Create our vertex buffer and vertex buffer elements
375 */
376 c->vertex_buf.stride = sizeof(struct vertex4f);
377 c->vertex_buf.buffer_offset = 0;
378 create_vertex_buffer(c);
379
380 vertex_elems[0].src_offset = 0;
381 vertex_elems[0].instance_divisor = 0;
382 vertex_elems[0].vertex_buffer_index = 0;
383 vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
384 vertex_elems[1].src_offset = sizeof(struct vertex2f);
385 vertex_elems[1].instance_divisor = 0;
386 vertex_elems[1].vertex_buffer_index = 0;
387 vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
388 c->vertex_elems_state = c->pipe->create_vertex_elements_state(c->pipe, 2, vertex_elems);
389
390 /*
391 * Create our fragment shader's constant buffer
392 * Const buffer contains the color conversion matrix and bias vectors
393 */
394 /* XXX: Create with IMMUTABLE/STATIC... although it does change every once in a long while... */
395 c->csc_matrix = pipe_buffer_create
396 (
397 c->pipe->screen,
398 PIPE_BIND_CONSTANT_BUFFER,
399 PIPE_USAGE_STATIC,
400 sizeof(csc_matrix)
401 );
402
403 return true;
404 }
405
406 static void
407 cleanup_buffers(struct vl_compositor *c)
408 {
409 assert(c);
410
411 c->pipe->delete_vertex_elements_state(c->pipe, c->vertex_elems_state);
412 pipe_resource_reference(&c->vertex_buf.buffer, NULL);
413 pipe_resource_reference(&c->csc_matrix, NULL);
414 }
415
416 static INLINE struct pipe_video_rect
417 default_rect(struct vl_compositor_layer *layer)
418 {
419 struct pipe_resource *res = layer->sampler_views[0]->texture;
420 struct pipe_video_rect rect = { 0, 0, res->width0, res->height0 };
421 return rect;
422 }
423
424 static INLINE struct vertex2f
425 calc_topleft(struct vertex2f size, struct pipe_video_rect rect)
426 {
427 struct vertex2f res = { rect.x / size.x, rect.y / size.y };
428 return res;
429 }
430
431 static INLINE struct vertex2f
432 calc_bottomright(struct vertex2f size, struct pipe_video_rect rect)
433 {
434 struct vertex2f res = { (rect.x + rect.w) / size.x, (rect.y + rect.h) / size.y };
435 return res;
436 }
437
438 static INLINE void
439 calc_src_and_dst(struct vl_compositor_layer *layer, unsigned width, unsigned height,
440 struct pipe_video_rect src, struct pipe_video_rect dst)
441 {
442 struct vertex2f size = { width, height };
443
444 layer->src.tl = calc_topleft(size, src);
445 layer->src.br = calc_bottomright(size, src);
446 layer->dst.tl = calc_topleft(size, dst);
447 layer->dst.br = calc_bottomright(size, dst);
448 }
449
450 static void
451 gen_rect_verts(struct vertex4f *vb, struct vl_compositor_layer *layer)
452 {
453 assert(vb && layer);
454
455 vb[0].x = layer->dst.tl.x;
456 vb[0].y = layer->dst.tl.y;
457 vb[0].z = layer->src.tl.x;
458 vb[0].w = layer->src.tl.y;
459
460 vb[1].x = layer->dst.br.x;
461 vb[1].y = layer->dst.tl.y;
462 vb[1].z = layer->src.br.x;
463 vb[1].w = layer->src.tl.y;
464
465 vb[2].x = layer->dst.br.x;
466 vb[2].y = layer->dst.br.y;
467 vb[2].z = layer->src.br.x;
468 vb[2].w = layer->src.br.y;
469
470 vb[3].x = layer->dst.tl.x;
471 vb[3].y = layer->dst.br.y;
472 vb[3].z = layer->src.tl.x;
473 vb[3].w = layer->src.br.y;
474 }
475
476 static INLINE struct u_rect
477 calc_drawn_area(struct vl_compositor *c, struct vl_compositor_layer *layer)
478 {
479 struct u_rect result;
480
481 // scale
482 result.x0 = layer->dst.tl.x * c->viewport.scale[0] + c->viewport.translate[0];
483 result.y0 = layer->dst.tl.y * c->viewport.scale[1] + c->viewport.translate[1];
484 result.x1 = layer->dst.br.x * c->viewport.scale[0] + c->viewport.translate[0];
485 result.y1 = layer->dst.br.y * c->viewport.scale[1] + c->viewport.translate[1];
486
487 // and clip
488 result.x0 = MAX2(result.x0, c->scissor.minx);
489 result.y0 = MAX2(result.y0, c->scissor.miny);
490 result.x1 = MIN2(result.x1, c->scissor.maxx);
491 result.y1 = MIN2(result.y1, c->scissor.maxy);
492 return result;
493 }
494
495 static void
496 gen_vertex_data(struct vl_compositor *c, struct u_rect *dirty)
497 {
498 struct vertex4f *vb;
499 struct pipe_transfer *buf_transfer;
500 unsigned i;
501
502 assert(c);
503
504 vb = pipe_buffer_map(c->pipe, c->vertex_buf.buffer,
505 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD | PIPE_TRANSFER_DONTBLOCK,
506 &buf_transfer);
507
508 if (!vb) {
509 // If buffer is still locked from last draw create a new one
510 create_vertex_buffer(c);
511 vb = pipe_buffer_map(c->pipe, c->vertex_buf.buffer,
512 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
513 &buf_transfer);
514 }
515
516 for (i = 0; i < VL_COMPOSITOR_MAX_LAYERS; i++) {
517 if (c->used_layers & (1 << i)) {
518 struct vl_compositor_layer *layer = &c->layers[i];
519 gen_rect_verts(vb, layer);
520 vb += 4;
521
522 if (dirty && layer->clearing) {
523 struct u_rect drawn = calc_drawn_area(c, layer);
524 if (
525 dirty->x0 >= drawn.x0 &&
526 dirty->y0 >= drawn.y0 &&
527 dirty->x1 <= drawn.x1 &&
528 dirty->y1 <= drawn.y1) {
529
530 // We clear the dirty area anyway, no need for clear_render_target
531 dirty->x0 = dirty->y0 = MAX_DIRTY;
532 dirty->x1 = dirty->y1 = MIN_DIRTY;
533 }
534 }
535 }
536 }
537
538 pipe_buffer_unmap(c->pipe, buf_transfer);
539 }
540
541 static void
542 draw_layers(struct vl_compositor *c, struct u_rect *dirty)
543 {
544 unsigned vb_index, i;
545
546 assert(c);
547
548 for (i = 0, vb_index = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i) {
549 if (c->used_layers & (1 << i)) {
550 struct vl_compositor_layer *layer = &c->layers[i];
551 struct pipe_sampler_view **samplers = &layer->sampler_views[0];
552 unsigned num_sampler_views = !samplers[1] ? 1 : !samplers[2] ? 2 : 3;
553
554 c->pipe->bind_blend_state(c->pipe, layer->blend);
555 c->pipe->bind_fs_state(c->pipe, layer->fs);
556 c->pipe->bind_fragment_sampler_states(c->pipe, num_sampler_views, layer->samplers);
557 c->pipe->set_fragment_sampler_views(c->pipe, num_sampler_views, samplers);
558 util_draw_arrays(c->pipe, PIPE_PRIM_QUADS, vb_index * 4, 4);
559 vb_index++;
560
561 if (dirty) {
562 // Remember the currently drawn area as dirty for the next draw command
563 struct u_rect drawn = calc_drawn_area(c, layer);
564 dirty->x0 = MIN2(drawn.x0, dirty->x0);
565 dirty->y0 = MIN2(drawn.y0, dirty->y0);
566 dirty->x1 = MAX2(drawn.x1, dirty->x1);
567 dirty->y1 = MAX2(drawn.y1, dirty->y1);
568 }
569 }
570 }
571 }
572
573 void
574 vl_compositor_reset_dirty_area(struct u_rect *dirty)
575 {
576 assert(dirty);
577
578 dirty->x0 = dirty->y0 = MIN_DIRTY;
579 dirty->x1 = dirty->y1 = MAX_DIRTY;
580 }
581
582 void
583 vl_compositor_set_clear_color(struct vl_compositor *c, union pipe_color_union *color)
584 {
585 assert(c);
586
587 c->clear_color = *color;
588 }
589
590 void
591 vl_compositor_get_clear_color(struct vl_compositor *c, union pipe_color_union *color)
592 {
593 assert(c);
594 assert(color);
595
596 *color = c->clear_color;
597 }
598
599 void
600 vl_compositor_clear_layers(struct vl_compositor *c)
601 {
602 unsigned i, j;
603
604 assert(c);
605
606 c->used_layers = 0;
607 for ( i = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i) {
608 c->layers[i].clearing = i ? false : true;
609 c->layers[i].blend = i ? c->blend_add : c->blend_clear;
610 c->layers[i].fs = NULL;
611 for ( j = 0; j < 3; j++)
612 pipe_sampler_view_reference(&c->layers[i].sampler_views[j], NULL);
613 }
614 }
615
616 void
617 vl_compositor_cleanup(struct vl_compositor *c)
618 {
619 assert(c);
620
621 vl_compositor_clear_layers(c);
622
623 cleanup_buffers(c);
624 cleanup_shaders(c);
625 cleanup_pipe_state(c);
626 }
627
628 void
629 vl_compositor_set_csc_matrix(struct vl_compositor *c, const float matrix[16])
630 {
631 struct pipe_transfer *buf_transfer;
632
633 assert(c);
634
635 memcpy
636 (
637 pipe_buffer_map(c->pipe, c->csc_matrix,
638 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
639 &buf_transfer),
640 matrix,
641 sizeof(csc_matrix)
642 );
643
644 pipe_buffer_unmap(c->pipe, buf_transfer);
645 }
646
647 void
648 vl_compositor_set_layer_blend(struct vl_compositor *c,
649 unsigned layer, void *blend,
650 bool is_clearing)
651 {
652 assert(c && blend);
653
654 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
655
656 c->layers[layer].clearing = is_clearing;
657 c->layers[layer].blend = blend;
658 }
659
660 void
661 vl_compositor_set_buffer_layer(struct vl_compositor *c,
662 unsigned layer,
663 struct pipe_video_buffer *buffer,
664 struct pipe_video_rect *src_rect,
665 struct pipe_video_rect *dst_rect)
666 {
667 struct pipe_sampler_view **sampler_views;
668 unsigned i;
669
670 assert(c && buffer);
671
672 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
673
674 c->used_layers |= 1 << layer;
675 c->layers[layer].fs = c->fs_video_buffer;
676
677 sampler_views = buffer->get_sampler_view_components(buffer);
678 for (i = 0; i < 3; ++i) {
679 c->layers[layer].samplers[i] = c->sampler_linear;
680 pipe_sampler_view_reference(&c->layers[layer].sampler_views[i], sampler_views[i]);
681 }
682
683 calc_src_and_dst(&c->layers[layer], buffer->width, buffer->height,
684 src_rect ? *src_rect : default_rect(&c->layers[layer]),
685 dst_rect ? *dst_rect : default_rect(&c->layers[layer]));
686 }
687
688 void
689 vl_compositor_set_palette_layer(struct vl_compositor *c,
690 unsigned layer,
691 struct pipe_sampler_view *indexes,
692 struct pipe_sampler_view *palette,
693 struct pipe_video_rect *src_rect,
694 struct pipe_video_rect *dst_rect,
695 bool include_color_conversion)
696 {
697 assert(c && indexes && palette);
698
699 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
700
701 c->used_layers |= 1 << layer;
702
703 c->layers[layer].fs = include_color_conversion ?
704 c->fs_palette.yuv : c->fs_palette.rgb;
705
706 c->layers[layer].samplers[0] = c->sampler_linear;
707 c->layers[layer].samplers[1] = c->sampler_nearest;
708 c->layers[layer].samplers[2] = NULL;
709 pipe_sampler_view_reference(&c->layers[layer].sampler_views[0], indexes);
710 pipe_sampler_view_reference(&c->layers[layer].sampler_views[1], palette);
711 pipe_sampler_view_reference(&c->layers[layer].sampler_views[2], NULL);
712 calc_src_and_dst(&c->layers[layer], indexes->texture->width0, indexes->texture->height0,
713 src_rect ? *src_rect : default_rect(&c->layers[layer]),
714 dst_rect ? *dst_rect : default_rect(&c->layers[layer]));
715 }
716
717 void
718 vl_compositor_set_rgba_layer(struct vl_compositor *c,
719 unsigned layer,
720 struct pipe_sampler_view *rgba,
721 struct pipe_video_rect *src_rect,
722 struct pipe_video_rect *dst_rect)
723 {
724 assert(c && rgba);
725
726 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
727
728 c->used_layers |= 1 << layer;
729 c->layers[layer].fs = c->fs_rgba;
730 c->layers[layer].samplers[0] = c->sampler_linear;
731 c->layers[layer].samplers[1] = NULL;
732 c->layers[layer].samplers[2] = NULL;
733 pipe_sampler_view_reference(&c->layers[layer].sampler_views[0], rgba);
734 pipe_sampler_view_reference(&c->layers[layer].sampler_views[1], NULL);
735 pipe_sampler_view_reference(&c->layers[layer].sampler_views[2], NULL);
736 calc_src_and_dst(&c->layers[layer], rgba->texture->width0, rgba->texture->height0,
737 src_rect ? *src_rect : default_rect(&c->layers[layer]),
738 dst_rect ? *dst_rect : default_rect(&c->layers[layer]));
739 }
740
741 void
742 vl_compositor_render(struct vl_compositor *c,
743 struct pipe_surface *dst_surface,
744 struct pipe_video_rect *dst_area,
745 struct pipe_video_rect *dst_clip,
746 struct u_rect *dirty_area)
747 {
748 assert(c);
749 assert(dst_surface);
750
751 c->fb_state.width = dst_surface->width;
752 c->fb_state.height = dst_surface->height;
753 c->fb_state.cbufs[0] = dst_surface;
754
755 if (dst_area) {
756 c->viewport.scale[0] = dst_area->w;
757 c->viewport.scale[1] = dst_area->h;
758 c->viewport.translate[0] = dst_area->x;
759 c->viewport.translate[1] = dst_area->y;
760 } else {
761 c->viewport.scale[0] = dst_surface->width;
762 c->viewport.scale[1] = dst_surface->height;
763 c->viewport.translate[0] = 0;
764 c->viewport.translate[1] = 0;
765 }
766
767 if (dst_clip) {
768 c->scissor.minx = dst_clip->x;
769 c->scissor.miny = dst_clip->y;
770 c->scissor.maxx = dst_clip->x + dst_clip->w;
771 c->scissor.maxy = dst_clip->y + dst_clip->h;
772 } else {
773 c->scissor.minx = 0;
774 c->scissor.miny = 0;
775 c->scissor.maxx = dst_surface->width;
776 c->scissor.maxy = dst_surface->height;
777 }
778
779 gen_vertex_data(c, dirty_area);
780
781 if (dirty_area && (dirty_area->x0 < dirty_area->x1 ||
782 dirty_area->y0 < dirty_area->y1)) {
783
784 c->pipe->clear_render_target(c->pipe, dst_surface, &c->clear_color,
785 0, 0, dst_surface->width, dst_surface->height);
786 dirty_area->x0 = dirty_area->y0 = MAX_DIRTY;
787 dirty_area->x0 = dirty_area->y1 = MIN_DIRTY;
788 }
789
790 c->pipe->set_scissor_state(c->pipe, &c->scissor);
791 c->pipe->set_framebuffer_state(c->pipe, &c->fb_state);
792 c->pipe->set_viewport_state(c->pipe, &c->viewport);
793 c->pipe->bind_vs_state(c->pipe, c->vs);
794 c->pipe->set_vertex_buffers(c->pipe, 1, &c->vertex_buf);
795 c->pipe->bind_vertex_elements_state(c->pipe, c->vertex_elems_state);
796 c->pipe->set_constant_buffer(c->pipe, PIPE_SHADER_FRAGMENT, 0, c->csc_matrix);
797 c->pipe->bind_rasterizer_state(c->pipe, c->rast);
798
799 draw_layers(c, dirty_area);
800 }
801
802 bool
803 vl_compositor_init(struct vl_compositor *c, struct pipe_context *pipe)
804 {
805 csc_matrix csc_matrix;
806
807 c->pipe = pipe;
808
809 if (!init_pipe_state(c))
810 return false;
811
812 if (!init_shaders(c)) {
813 cleanup_pipe_state(c);
814 return false;
815 }
816
817 if (!init_buffers(c)) {
818 cleanup_shaders(c);
819 cleanup_pipe_state(c);
820 return false;
821 }
822
823 vl_compositor_clear_layers(c);
824
825 vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_IDENTITY, NULL, true, csc_matrix);
826 vl_compositor_set_csc_matrix(c, csc_matrix);
827
828 c->clear_color.f[0] = c->clear_color.f[1] = 0.0f;
829 c->clear_color.f[2] = c->clear_color.f[3] = 0.0f;
830
831 return true;
832 }