1 /**************************************************************************
3 * Copyright 2009 Younes Manton.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 #include "pipe/p_compiler.h"
31 #include "pipe/p_context.h"
33 #include "util/u_memory.h"
34 #include "util/u_draw.h"
35 #include "util/u_surface.h"
37 #include "tgsi/tgsi_ureg.h"
41 #include "vl_compositor.h"
44 #define MAX_DIRTY (1 << 15)
54 typedef float csc_matrix
[16];
57 create_vert_shader(struct vl_compositor
*c
)
59 struct ureg_program
*shader
;
60 struct ureg_src vpos
, vtex
;
62 struct ureg_dst o_vpos
, o_vtex
;
63 struct ureg_dst o_vtop
, o_vbottom
;
65 shader
= ureg_create(TGSI_PROCESSOR_VERTEX
);
69 vpos
= ureg_DECL_vs_input(shader
, 0);
70 vtex
= ureg_DECL_vs_input(shader
, 1);
71 tmp
= ureg_DECL_temporary(shader
);
72 o_vpos
= ureg_DECL_output(shader
, TGSI_SEMANTIC_POSITION
, VS_O_VPOS
);
73 o_vtex
= ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VTEX
);
74 o_vtop
= ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VTOP
);
75 o_vbottom
= ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VBOTTOM
);
81 ureg_MOV(shader
, o_vpos
, vpos
);
82 ureg_MOV(shader
, o_vtex
, vtex
);
84 ureg_MUL(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_X
),
85 ureg_scalar(vtex
, TGSI_SWIZZLE_W
), ureg_imm1f(shader
, 0.5f
));
86 ureg_MUL(shader
, ureg_writemask(tmp
, TGSI_WRITEMASK_Y
),
87 ureg_scalar(vtex
, TGSI_SWIZZLE_W
), ureg_imm1f(shader
, 0.25f
));
89 ureg_MOV(shader
, ureg_writemask(o_vtop
, TGSI_WRITEMASK_X
), vtex
);
90 ureg_MAD(shader
, ureg_writemask(o_vtop
, TGSI_WRITEMASK_Y
), ureg_scalar(vtex
, TGSI_SWIZZLE_Y
),
91 ureg_scalar(ureg_src(tmp
), TGSI_SWIZZLE_X
), ureg_imm1f(shader
, 0.25f
));
92 ureg_MAD(shader
, ureg_writemask(o_vtop
, TGSI_WRITEMASK_Z
), ureg_scalar(vtex
, TGSI_SWIZZLE_Y
),
93 ureg_scalar(ureg_src(tmp
), TGSI_SWIZZLE_Y
), ureg_imm1f(shader
, 0.25f
));
94 ureg_RCP(shader
, ureg_writemask(o_vtop
, TGSI_WRITEMASK_W
),
95 ureg_scalar(ureg_src(tmp
), TGSI_SWIZZLE_X
));
97 ureg_MOV(shader
, ureg_writemask(o_vbottom
, TGSI_WRITEMASK_X
), vtex
);
98 ureg_MAD(shader
, ureg_writemask(o_vbottom
, TGSI_WRITEMASK_Y
), ureg_scalar(vtex
, TGSI_SWIZZLE_Y
),
99 ureg_scalar(ureg_src(tmp
), TGSI_SWIZZLE_X
), ureg_imm1f(shader
, -0.25f
));
100 ureg_MAD(shader
, ureg_writemask(o_vbottom
, TGSI_WRITEMASK_Z
), ureg_scalar(vtex
, TGSI_SWIZZLE_Y
),
101 ureg_scalar(ureg_src(tmp
), TGSI_SWIZZLE_Y
), ureg_imm1f(shader
, -0.25f
));
102 ureg_RCP(shader
, ureg_writemask(o_vbottom
, TGSI_WRITEMASK_W
),
103 ureg_scalar(ureg_src(tmp
), TGSI_SWIZZLE_Y
));
107 return ureg_create_shader_and_destroy(shader
, c
->pipe
);
111 create_frag_shader_video_buffer(struct vl_compositor
*c
)
113 struct ureg_program
*shader
;
115 struct ureg_src csc
[3];
116 struct ureg_src sampler
[3];
117 struct ureg_dst texel
;
118 struct ureg_dst fragment
;
121 shader
= ureg_create(TGSI_PROCESSOR_FRAGMENT
);
125 tc
= ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, 1, TGSI_INTERPOLATE_LINEAR
);
126 for (i
= 0; i
< 3; ++i
) {
127 csc
[i
] = ureg_DECL_constant(shader
, i
);
128 sampler
[i
] = ureg_DECL_sampler(shader
, i
);
130 texel
= ureg_DECL_temporary(shader
);
131 fragment
= ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, 0);
134 * texel.xyz = tex(tc, sampler[i])
135 * fragment = csc * texel
137 for (i
= 0; i
< 3; ++i
)
138 ureg_TEX(shader
, ureg_writemask(texel
, TGSI_WRITEMASK_X
<< i
), TGSI_TEXTURE_3D
, tc
, sampler
[i
]);
140 ureg_MOV(shader
, ureg_writemask(texel
, TGSI_WRITEMASK_W
), ureg_imm1f(shader
, 1.0f
));
142 for (i
= 0; i
< 3; ++i
)
143 ureg_DP4(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_X
<< i
), csc
[i
], ureg_src(texel
));
145 ureg_MOV(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_W
), ureg_imm1f(shader
, 1.0f
));
147 ureg_release_temporary(shader
, texel
);
150 return ureg_create_shader_and_destroy(shader
, c
->pipe
);
154 create_frag_shader_weave(struct vl_compositor
*c
)
156 struct ureg_program
*shader
;
157 struct ureg_src i_tc
[2];
158 struct ureg_src csc
[3];
159 struct ureg_src sampler
[3];
160 struct ureg_dst t_tc
[2];
161 struct ureg_dst t_texel
[2];
162 struct ureg_dst o_fragment
;
165 shader
= ureg_create(TGSI_PROCESSOR_FRAGMENT
);
169 i_tc
[0] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VTOP
, TGSI_INTERPOLATE_LINEAR
);
170 i_tc
[1] = ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VBOTTOM
, TGSI_INTERPOLATE_LINEAR
);
172 for (i
= 0; i
< 3; ++i
) {
173 csc
[i
] = ureg_DECL_constant(shader
, i
);
174 sampler
[i
] = ureg_DECL_sampler(shader
, i
);
177 for (i
= 0; i
< 2; ++i
) {
178 t_tc
[i
] = ureg_DECL_temporary(shader
);
179 t_texel
[i
] = ureg_DECL_temporary(shader
);
181 o_fragment
= ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, 0);
183 /* calculate the texture offsets
185 * t_tc.y = (round(i_tc.y) + 0.5) / height * 2
187 for (i
= 0; i
< 2; ++i
) {
188 ureg_MOV(shader
, ureg_writemask(t_tc
[i
], TGSI_WRITEMASK_X
), i_tc
[i
]);
189 ureg_ROUND(shader
, ureg_writemask(t_tc
[i
], TGSI_WRITEMASK_YZ
), i_tc
[i
]);
190 ureg_MOV(shader
, ureg_writemask(t_tc
[i
], TGSI_WRITEMASK_W
),
191 ureg_imm1f(shader
, i
? 0.75f
: 0.25f
));
192 ureg_ADD(shader
, ureg_writemask(t_tc
[i
], TGSI_WRITEMASK_YZ
),
193 ureg_src(t_tc
[i
]), ureg_imm1f(shader
, 0.5f
));
194 ureg_MUL(shader
, ureg_writemask(t_tc
[i
], TGSI_WRITEMASK_Y
),
195 ureg_src(t_tc
[i
]), ureg_scalar(i_tc
[0], TGSI_SWIZZLE_W
));
196 ureg_MUL(shader
, ureg_writemask(t_tc
[i
], TGSI_WRITEMASK_Z
),
197 ureg_src(t_tc
[i
]), ureg_scalar(i_tc
[1], TGSI_SWIZZLE_W
));
201 * texel[0..1].x = tex(t_tc[0..1][0])
202 * texel[0..1].y = tex(t_tc[0..1][1])
203 * texel[0..1].z = tex(t_tc[0..1][2])
205 for (i
= 0; i
< 2; ++i
)
206 for (j
= 0; j
< 3; ++j
) {
207 struct ureg_src src
= ureg_swizzle(ureg_src(t_tc
[i
]),
208 TGSI_SWIZZLE_X
, j
? TGSI_SWIZZLE_Z
: TGSI_SWIZZLE_Y
, TGSI_SWIZZLE_W
, TGSI_SWIZZLE_W
);
210 ureg_TEX(shader
, ureg_writemask(t_texel
[i
], TGSI_WRITEMASK_X
<< j
),
211 TGSI_TEXTURE_3D
, src
, sampler
[j
]);
214 /* calculate linear interpolation factor
215 * factor = |round(i_tc.y) - i_tc.y| * 2
217 ureg_ROUND(shader
, ureg_writemask(t_tc
[0], TGSI_WRITEMASK_YZ
), i_tc
[0]);
218 ureg_ADD(shader
, ureg_writemask(t_tc
[0], TGSI_WRITEMASK_YZ
),
219 ureg_src(t_tc
[0]), ureg_negate(i_tc
[0]));
220 ureg_MUL(shader
, ureg_writemask(t_tc
[0], TGSI_WRITEMASK_XY
),
221 ureg_abs(ureg_src(t_tc
[0])), ureg_imm1f(shader
, 2.0f
));
222 ureg_LRP(shader
, t_texel
[0], ureg_swizzle(ureg_src(t_tc
[0]),
223 TGSI_SWIZZLE_Y
, TGSI_SWIZZLE_Z
, TGSI_SWIZZLE_Z
, TGSI_SWIZZLE_Z
),
224 ureg_src(t_texel
[1]), ureg_src(t_texel
[0]));
226 /* and finally do colour space transformation
227 * fragment = csc * texel
229 ureg_MOV(shader
, ureg_writemask(t_texel
[0], TGSI_WRITEMASK_W
), ureg_imm1f(shader
, 1.0f
));
230 for (i
= 0; i
< 3; ++i
)
231 ureg_DP4(shader
, ureg_writemask(o_fragment
, TGSI_WRITEMASK_X
<< i
), csc
[i
], ureg_src(t_texel
[0]));
233 ureg_MOV(shader
, ureg_writemask(o_fragment
, TGSI_WRITEMASK_W
), ureg_imm1f(shader
, 1.0f
));
235 for (i
= 0; i
< 2; ++i
) {
236 ureg_release_temporary(shader
, t_texel
[i
]);
237 ureg_release_temporary(shader
, t_tc
[i
]);
242 return ureg_create_shader_and_destroy(shader
, c
->pipe
);
246 create_frag_shader_palette(struct vl_compositor
*c
, bool include_cc
)
248 struct ureg_program
*shader
;
249 struct ureg_src csc
[3];
251 struct ureg_src sampler
;
252 struct ureg_src palette
;
253 struct ureg_dst texel
;
254 struct ureg_dst fragment
;
257 shader
= ureg_create(TGSI_PROCESSOR_FRAGMENT
);
261 for (i
= 0; include_cc
&& i
< 3; ++i
)
262 csc
[i
] = ureg_DECL_constant(shader
, i
);
264 tc
= ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VTEX
, TGSI_INTERPOLATE_LINEAR
);
265 sampler
= ureg_DECL_sampler(shader
, 0);
266 palette
= ureg_DECL_sampler(shader
, 1);
268 texel
= ureg_DECL_temporary(shader
);
269 fragment
= ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, 0);
272 * texel = tex(tc, sampler)
273 * fragment.xyz = tex(texel, palette) * csc
274 * fragment.a = texel.a
276 ureg_TEX(shader
, texel
, TGSI_TEXTURE_2D
, tc
, sampler
);
277 ureg_MOV(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_W
), ureg_src(texel
));
280 ureg_TEX(shader
, texel
, TGSI_TEXTURE_1D
, ureg_src(texel
), palette
);
281 for (i
= 0; i
< 3; ++i
)
282 ureg_DP4(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_X
<< i
), csc
[i
], ureg_src(texel
));
284 ureg_TEX(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_XYZ
),
285 TGSI_TEXTURE_1D
, ureg_src(texel
), palette
);
288 ureg_release_temporary(shader
, texel
);
291 return ureg_create_shader_and_destroy(shader
, c
->pipe
);
295 create_frag_shader_rgba(struct vl_compositor
*c
)
297 struct ureg_program
*shader
;
299 struct ureg_src sampler
;
300 struct ureg_dst fragment
;
302 shader
= ureg_create(TGSI_PROCESSOR_FRAGMENT
);
306 tc
= ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, VS_O_VTEX
, TGSI_INTERPOLATE_LINEAR
);
307 sampler
= ureg_DECL_sampler(shader
, 0);
308 fragment
= ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, 0);
311 * fragment = tex(tc, sampler)
313 ureg_TEX(shader
, fragment
, TGSI_TEXTURE_2D
, tc
, sampler
);
316 return ureg_create_shader_and_destroy(shader
, c
->pipe
);
320 init_shaders(struct vl_compositor
*c
)
324 c
->vs
= create_vert_shader(c
);
326 debug_printf("Unable to create vertex shader.\n");
330 c
->fs_video_buffer
= create_frag_shader_video_buffer(c
);
331 if (!c
->fs_video_buffer
) {
332 debug_printf("Unable to create YCbCr-to-RGB fragment shader.\n");
336 c
->fs_weave
= create_frag_shader_weave(c
);
338 debug_printf("Unable to create YCbCr-to-RGB weave fragment shader.\n");
342 c
->fs_palette
.yuv
= create_frag_shader_palette(c
, true);
343 if (!c
->fs_palette
.yuv
) {
344 debug_printf("Unable to create YUV-Palette-to-RGB fragment shader.\n");
348 c
->fs_palette
.rgb
= create_frag_shader_palette(c
, false);
349 if (!c
->fs_palette
.rgb
) {
350 debug_printf("Unable to create RGB-Palette-to-RGB fragment shader.\n");
354 c
->fs_rgba
= create_frag_shader_rgba(c
);
356 debug_printf("Unable to create RGB-to-RGB fragment shader.\n");
363 static void cleanup_shaders(struct vl_compositor
*c
)
367 c
->pipe
->delete_vs_state(c
->pipe
, c
->vs
);
368 c
->pipe
->delete_fs_state(c
->pipe
, c
->fs_video_buffer
);
369 c
->pipe
->delete_fs_state(c
->pipe
, c
->fs_weave
);
370 c
->pipe
->delete_fs_state(c
->pipe
, c
->fs_palette
.yuv
);
371 c
->pipe
->delete_fs_state(c
->pipe
, c
->fs_palette
.rgb
);
372 c
->pipe
->delete_fs_state(c
->pipe
, c
->fs_rgba
);
376 init_pipe_state(struct vl_compositor
*c
)
378 struct pipe_rasterizer_state rast
;
379 struct pipe_sampler_state sampler
;
380 struct pipe_blend_state blend
;
381 struct pipe_depth_stencil_alpha_state dsa
;
386 c
->fb_state
.nr_cbufs
= 1;
387 c
->fb_state
.zsbuf
= NULL
;
389 memset(&sampler
, 0, sizeof(sampler
));
390 sampler
.wrap_s
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
391 sampler
.wrap_t
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
392 sampler
.wrap_r
= PIPE_TEX_WRAP_REPEAT
;
393 sampler
.min_img_filter
= PIPE_TEX_FILTER_LINEAR
;
394 sampler
.min_mip_filter
= PIPE_TEX_MIPFILTER_NONE
;
395 sampler
.mag_img_filter
= PIPE_TEX_FILTER_LINEAR
;
396 sampler
.compare_mode
= PIPE_TEX_COMPARE_NONE
;
397 sampler
.compare_func
= PIPE_FUNC_ALWAYS
;
398 sampler
.normalized_coords
= 1;
400 c
->sampler_linear
= c
->pipe
->create_sampler_state(c
->pipe
, &sampler
);
402 sampler
.min_img_filter
= PIPE_TEX_FILTER_NEAREST
;
403 sampler
.mag_img_filter
= PIPE_TEX_FILTER_NEAREST
;
404 c
->sampler_nearest
= c
->pipe
->create_sampler_state(c
->pipe
, &sampler
);
406 memset(&blend
, 0, sizeof blend
);
407 blend
.independent_blend_enable
= 0;
408 blend
.rt
[0].blend_enable
= 0;
409 blend
.logicop_enable
= 0;
410 blend
.logicop_func
= PIPE_LOGICOP_CLEAR
;
411 blend
.rt
[0].colormask
= PIPE_MASK_RGBA
;
413 c
->blend_clear
= c
->pipe
->create_blend_state(c
->pipe
, &blend
);
415 blend
.rt
[0].blend_enable
= 1;
416 blend
.rt
[0].rgb_func
= PIPE_BLEND_ADD
;
417 blend
.rt
[0].rgb_src_factor
= PIPE_BLENDFACTOR_SRC_ALPHA
;
418 blend
.rt
[0].rgb_dst_factor
= PIPE_BLENDFACTOR_INV_SRC_ALPHA
;
419 blend
.rt
[0].alpha_func
= PIPE_BLEND_ADD
;
420 blend
.rt
[0].alpha_src_factor
= PIPE_BLENDFACTOR_ONE
;
421 blend
.rt
[0].alpha_dst_factor
= PIPE_BLENDFACTOR_ONE
;
422 c
->blend_add
= c
->pipe
->create_blend_state(c
->pipe
, &blend
);
424 memset(&rast
, 0, sizeof rast
);
427 rast
.cull_face
= PIPE_FACE_NONE
;
428 rast
.fill_back
= PIPE_POLYGON_MODE_FILL
;
429 rast
.fill_front
= PIPE_POLYGON_MODE_FILL
;
432 rast
.point_size_per_vertex
= 1;
433 rast
.offset_units
= 1;
434 rast
.offset_scale
= 1;
435 rast
.gl_rasterization_rules
= 1;
438 c
->rast
= c
->pipe
->create_rasterizer_state(c
->pipe
, &rast
);
440 memset(&dsa
, 0, sizeof dsa
);
441 dsa
.depth
.enabled
= 0;
442 dsa
.depth
.writemask
= 0;
443 dsa
.depth
.func
= PIPE_FUNC_ALWAYS
;
444 for (i
= 0; i
< 2; ++i
) {
445 dsa
.stencil
[i
].enabled
= 0;
446 dsa
.stencil
[i
].func
= PIPE_FUNC_ALWAYS
;
447 dsa
.stencil
[i
].fail_op
= PIPE_STENCIL_OP_KEEP
;
448 dsa
.stencil
[i
].zpass_op
= PIPE_STENCIL_OP_KEEP
;
449 dsa
.stencil
[i
].zfail_op
= PIPE_STENCIL_OP_KEEP
;
450 dsa
.stencil
[i
].valuemask
= 0;
451 dsa
.stencil
[i
].writemask
= 0;
453 dsa
.alpha
.enabled
= 0;
454 dsa
.alpha
.func
= PIPE_FUNC_ALWAYS
;
455 dsa
.alpha
.ref_value
= 0;
456 c
->dsa
= c
->pipe
->create_depth_stencil_alpha_state(c
->pipe
, &dsa
);
457 c
->pipe
->bind_depth_stencil_alpha_state(c
->pipe
, c
->dsa
);
462 static void cleanup_pipe_state(struct vl_compositor
*c
)
466 /* Asserted in softpipe_delete_fs_state() for some reason */
467 c
->pipe
->bind_vs_state(c
->pipe
, NULL
);
468 c
->pipe
->bind_fs_state(c
->pipe
, NULL
);
470 c
->pipe
->delete_depth_stencil_alpha_state(c
->pipe
, c
->dsa
);
471 c
->pipe
->delete_sampler_state(c
->pipe
, c
->sampler_linear
);
472 c
->pipe
->delete_sampler_state(c
->pipe
, c
->sampler_nearest
);
473 c
->pipe
->delete_blend_state(c
->pipe
, c
->blend_clear
);
474 c
->pipe
->delete_blend_state(c
->pipe
, c
->blend_add
);
475 c
->pipe
->delete_rasterizer_state(c
->pipe
, c
->rast
);
479 create_vertex_buffer(struct vl_compositor
*c
)
483 pipe_resource_reference(&c
->vertex_buf
.buffer
, NULL
);
484 c
->vertex_buf
.buffer
= pipe_buffer_create
487 PIPE_BIND_VERTEX_BUFFER
,
489 c
->vertex_buf
.stride
* VL_COMPOSITOR_MAX_LAYERS
* 4
492 return c
->vertex_buf
.buffer
!= NULL
;
496 init_buffers(struct vl_compositor
*c
)
498 struct pipe_vertex_element vertex_elems
[2];
503 * Create our vertex buffer and vertex buffer elements
505 c
->vertex_buf
.stride
= sizeof(struct vertex2f
) + sizeof(struct vertex4f
);
506 c
->vertex_buf
.buffer_offset
= 0;
507 create_vertex_buffer(c
);
509 vertex_elems
[0].src_offset
= 0;
510 vertex_elems
[0].instance_divisor
= 0;
511 vertex_elems
[0].vertex_buffer_index
= 0;
512 vertex_elems
[0].src_format
= PIPE_FORMAT_R32G32_FLOAT
;
513 vertex_elems
[1].src_offset
= sizeof(struct vertex2f
);
514 vertex_elems
[1].instance_divisor
= 0;
515 vertex_elems
[1].vertex_buffer_index
= 0;
516 vertex_elems
[1].src_format
= PIPE_FORMAT_R32G32B32A32_FLOAT
;
517 c
->vertex_elems_state
= c
->pipe
->create_vertex_elements_state(c
->pipe
, 2, vertex_elems
);
523 cleanup_buffers(struct vl_compositor
*c
)
527 c
->pipe
->delete_vertex_elements_state(c
->pipe
, c
->vertex_elems_state
);
528 pipe_resource_reference(&c
->vertex_buf
.buffer
, NULL
);
531 static INLINE
struct u_rect
532 default_rect(struct vl_compositor_layer
*layer
)
534 struct pipe_resource
*res
= layer
->sampler_views
[0]->texture
;
535 struct u_rect rect
= { 0, res
->width0
, 0, res
->height0
* res
->depth0
};
539 static INLINE
struct vertex2f
540 calc_topleft(struct vertex2f size
, struct u_rect rect
)
542 struct vertex2f res
= { rect
.x0
/ size
.x
, rect
.y0
/ size
.y
};
546 static INLINE
struct vertex2f
547 calc_bottomright(struct vertex2f size
, struct u_rect rect
)
549 struct vertex2f res
= { rect
.x1
/ size
.x
, rect
.y1
/ size
.y
};
554 calc_src_and_dst(struct vl_compositor_layer
*layer
, unsigned width
, unsigned height
,
555 struct u_rect src
, struct u_rect dst
)
557 struct vertex2f size
= { width
, height
};
559 layer
->src
.tl
= calc_topleft(size
, src
);
560 layer
->src
.br
= calc_bottomright(size
, src
);
561 layer
->dst
.tl
= calc_topleft(size
, dst
);
562 layer
->dst
.br
= calc_bottomright(size
, dst
);
564 layer
->zw
.y
= size
.y
;
568 gen_rect_verts(struct vertex2f
*vb
, struct vl_compositor_layer
*layer
)
572 vb
[ 0].x
= layer
->dst
.tl
.x
;
573 vb
[ 0].y
= layer
->dst
.tl
.y
;
574 vb
[ 1].x
= layer
->src
.tl
.x
;
575 vb
[ 1].y
= layer
->src
.tl
.y
;
578 vb
[ 3].x
= layer
->dst
.br
.x
;
579 vb
[ 3].y
= layer
->dst
.tl
.y
;
580 vb
[ 4].x
= layer
->src
.br
.x
;
581 vb
[ 4].y
= layer
->src
.tl
.y
;
584 vb
[ 6].x
= layer
->dst
.br
.x
;
585 vb
[ 6].y
= layer
->dst
.br
.y
;
586 vb
[ 7].x
= layer
->src
.br
.x
;
587 vb
[ 7].y
= layer
->src
.br
.y
;
590 vb
[ 9].x
= layer
->dst
.tl
.x
;
591 vb
[ 9].y
= layer
->dst
.br
.y
;
592 vb
[10].x
= layer
->src
.tl
.x
;
593 vb
[10].y
= layer
->src
.br
.y
;
597 static INLINE
struct u_rect
598 calc_drawn_area(struct vl_compositor_state
*s
, struct vl_compositor_layer
*layer
)
600 struct u_rect result
;
603 result
.x0
= layer
->dst
.tl
.x
* s
->viewport
.scale
[0] + s
->viewport
.translate
[0];
604 result
.y0
= layer
->dst
.tl
.y
* s
->viewport
.scale
[1] + s
->viewport
.translate
[1];
605 result
.x1
= layer
->dst
.br
.x
* s
->viewport
.scale
[0] + s
->viewport
.translate
[0];
606 result
.y1
= layer
->dst
.br
.y
* s
->viewport
.scale
[1] + s
->viewport
.translate
[1];
609 result
.x0
= MAX2(result
.x0
, s
->scissor
.minx
);
610 result
.y0
= MAX2(result
.y0
, s
->scissor
.miny
);
611 result
.x1
= MIN2(result
.x1
, s
->scissor
.maxx
);
612 result
.y1
= MIN2(result
.y1
, s
->scissor
.maxy
);
617 gen_vertex_data(struct vl_compositor
*c
, struct vl_compositor_state
*s
, struct u_rect
*dirty
)
620 struct pipe_transfer
*buf_transfer
;
625 vb
= pipe_buffer_map(c
->pipe
, c
->vertex_buf
.buffer
,
626 PIPE_TRANSFER_WRITE
| PIPE_TRANSFER_DISCARD_RANGE
| PIPE_TRANSFER_DONTBLOCK
,
630 // If buffer is still locked from last draw create a new one
631 create_vertex_buffer(c
);
632 vb
= pipe_buffer_map(c
->pipe
, c
->vertex_buf
.buffer
,
633 PIPE_TRANSFER_WRITE
| PIPE_TRANSFER_DISCARD_RANGE
,
637 for (i
= 0; i
< VL_COMPOSITOR_MAX_LAYERS
; i
++) {
638 if (s
->used_layers
& (1 << i
)) {
639 struct vl_compositor_layer
*layer
= &s
->layers
[i
];
640 gen_rect_verts(vb
, layer
);
643 if (dirty
&& layer
->clearing
) {
644 struct u_rect drawn
= calc_drawn_area(s
, layer
);
646 dirty
->x0
>= drawn
.x0
&&
647 dirty
->y0
>= drawn
.y0
&&
648 dirty
->x1
<= drawn
.x1
&&
649 dirty
->y1
<= drawn
.y1
) {
651 // We clear the dirty area anyway, no need for clear_render_target
652 dirty
->x0
= dirty
->y0
= MAX_DIRTY
;
653 dirty
->x1
= dirty
->y1
= MIN_DIRTY
;
659 pipe_buffer_unmap(c
->pipe
, buf_transfer
);
663 draw_layers(struct vl_compositor
*c
, struct vl_compositor_state
*s
, struct u_rect
*dirty
)
665 unsigned vb_index
, i
;
669 for (i
= 0, vb_index
= 0; i
< VL_COMPOSITOR_MAX_LAYERS
; ++i
) {
670 if (s
->used_layers
& (1 << i
)) {
671 struct vl_compositor_layer
*layer
= &s
->layers
[i
];
672 struct pipe_sampler_view
**samplers
= &layer
->sampler_views
[0];
673 unsigned num_sampler_views
= !samplers
[1] ? 1 : !samplers
[2] ? 2 : 3;
674 void *blend
= layer
->blend
? layer
->blend
: i
? c
->blend_add
: c
->blend_clear
;
676 c
->pipe
->bind_blend_state(c
->pipe
, blend
);
677 c
->pipe
->bind_fs_state(c
->pipe
, layer
->fs
);
678 c
->pipe
->bind_fragment_sampler_states(c
->pipe
, num_sampler_views
, layer
->samplers
);
679 c
->pipe
->set_fragment_sampler_views(c
->pipe
, num_sampler_views
, samplers
);
680 util_draw_arrays(c
->pipe
, PIPE_PRIM_QUADS
, vb_index
* 4, 4);
684 // Remember the currently drawn area as dirty for the next draw command
685 struct u_rect drawn
= calc_drawn_area(s
, layer
);
686 dirty
->x0
= MIN2(drawn
.x0
, dirty
->x0
);
687 dirty
->y0
= MIN2(drawn
.y0
, dirty
->y0
);
688 dirty
->x1
= MAX2(drawn
.x1
, dirty
->x1
);
689 dirty
->y1
= MAX2(drawn
.y1
, dirty
->y1
);
696 vl_compositor_reset_dirty_area(struct u_rect
*dirty
)
700 dirty
->x0
= dirty
->y0
= MIN_DIRTY
;
701 dirty
->x1
= dirty
->y1
= MAX_DIRTY
;
705 vl_compositor_set_clear_color(struct vl_compositor_state
*s
, union pipe_color_union
*color
)
710 s
->clear_color
= *color
;
714 vl_compositor_get_clear_color(struct vl_compositor_state
*s
, union pipe_color_union
*color
)
719 *color
= s
->clear_color
;
723 vl_compositor_clear_layers(struct vl_compositor_state
*s
)
730 for ( i
= 0; i
< VL_COMPOSITOR_MAX_LAYERS
; ++i
) {
731 s
->layers
[i
].clearing
= i
? false : true;
732 s
->layers
[i
].blend
= NULL
;
733 s
->layers
[i
].fs
= NULL
;
734 for ( j
= 0; j
< 3; j
++)
735 pipe_sampler_view_reference(&s
->layers
[i
].sampler_views
[j
], NULL
);
740 vl_compositor_cleanup(struct vl_compositor
*c
)
746 cleanup_pipe_state(c
);
750 vl_compositor_set_csc_matrix(struct vl_compositor_state
*s
, const float matrix
[16])
752 struct pipe_transfer
*buf_transfer
;
758 pipe_buffer_map(s
->pipe
, s
->csc_matrix
,
759 PIPE_TRANSFER_WRITE
| PIPE_TRANSFER_DISCARD_RANGE
,
765 pipe_buffer_unmap(s
->pipe
, buf_transfer
);
769 vl_compositor_set_dst_area(struct vl_compositor_state
*s
, struct u_rect
*dst_area
)
773 s
->viewport_valid
= dst_area
!= NULL
;
775 s
->viewport
.scale
[0] = dst_area
->x1
- dst_area
->x0
;
776 s
->viewport
.scale
[1] = dst_area
->y1
- dst_area
->y0
;
777 s
->viewport
.translate
[0] = dst_area
->x0
;
778 s
->viewport
.translate
[1] = dst_area
->y0
;
783 vl_compositor_set_dst_clip(struct vl_compositor_state
*s
, struct u_rect
*dst_clip
)
787 s
->scissor_valid
= dst_clip
!= NULL
;
789 s
->scissor
.minx
= dst_clip
->x0
;
790 s
->scissor
.miny
= dst_clip
->y0
;
791 s
->scissor
.maxx
= dst_clip
->x1
;
792 s
->scissor
.maxy
= dst_clip
->y1
;
797 vl_compositor_set_layer_blend(struct vl_compositor_state
*s
,
798 unsigned layer
, void *blend
,
803 assert(layer
< VL_COMPOSITOR_MAX_LAYERS
);
805 s
->layers
[layer
].clearing
= is_clearing
;
806 s
->layers
[layer
].blend
= blend
;
810 vl_compositor_set_buffer_layer(struct vl_compositor_state
*s
,
811 struct vl_compositor
*c
,
813 struct pipe_video_buffer
*buffer
,
814 struct u_rect
*src_rect
,
815 struct u_rect
*dst_rect
,
816 enum vl_compositor_deinterlace deinterlace
)
818 struct pipe_sampler_view
**sampler_views
;
821 assert(s
&& c
&& buffer
);
823 assert(layer
< VL_COMPOSITOR_MAX_LAYERS
);
825 s
->used_layers
|= 1 << layer
;
826 sampler_views
= buffer
->get_sampler_view_components(buffer
);
827 for (i
= 0; i
< 3; ++i
) {
828 s
->layers
[layer
].samplers
[i
] = c
->sampler_linear
;
829 pipe_sampler_view_reference(&s
->layers
[layer
].sampler_views
[i
], sampler_views
[i
]);
832 calc_src_and_dst(&s
->layers
[layer
], buffer
->width
, buffer
->height
,
833 src_rect
? *src_rect
: default_rect(&s
->layers
[layer
]),
834 dst_rect
? *dst_rect
: default_rect(&s
->layers
[layer
]));
836 if (buffer
->interlaced
) {
837 float half_a_line
= 0.5f
/ s
->layers
[layer
].zw
.y
;
838 switch(deinterlace
) {
839 case VL_COMPOSITOR_WEAVE
:
840 s
->layers
[layer
].fs
= c
->fs_weave
;
843 case VL_COMPOSITOR_BOB_TOP
:
844 s
->layers
[layer
].zw
.x
= 0.25f
;
845 s
->layers
[layer
].src
.tl
.y
+= half_a_line
;
846 s
->layers
[layer
].src
.br
.y
+= half_a_line
;
847 s
->layers
[layer
].fs
= c
->fs_video_buffer
;
850 case VL_COMPOSITOR_BOB_BOTTOM
:
851 s
->layers
[layer
].zw
.x
= 0.75f
;
852 s
->layers
[layer
].src
.tl
.y
-= half_a_line
;
853 s
->layers
[layer
].src
.br
.y
-= half_a_line
;
854 s
->layers
[layer
].fs
= c
->fs_video_buffer
;
859 s
->layers
[layer
].fs
= c
->fs_video_buffer
;
863 vl_compositor_set_palette_layer(struct vl_compositor_state
*s
,
864 struct vl_compositor
*c
,
866 struct pipe_sampler_view
*indexes
,
867 struct pipe_sampler_view
*palette
,
868 struct u_rect
*src_rect
,
869 struct u_rect
*dst_rect
,
870 bool include_color_conversion
)
872 assert(s
&& c
&& indexes
&& palette
);
874 assert(layer
< VL_COMPOSITOR_MAX_LAYERS
);
876 s
->used_layers
|= 1 << layer
;
878 s
->layers
[layer
].fs
= include_color_conversion
?
879 c
->fs_palette
.yuv
: c
->fs_palette
.rgb
;
881 s
->layers
[layer
].samplers
[0] = c
->sampler_linear
;
882 s
->layers
[layer
].samplers
[1] = c
->sampler_nearest
;
883 s
->layers
[layer
].samplers
[2] = NULL
;
884 pipe_sampler_view_reference(&s
->layers
[layer
].sampler_views
[0], indexes
);
885 pipe_sampler_view_reference(&s
->layers
[layer
].sampler_views
[1], palette
);
886 pipe_sampler_view_reference(&s
->layers
[layer
].sampler_views
[2], NULL
);
887 calc_src_and_dst(&s
->layers
[layer
], indexes
->texture
->width0
, indexes
->texture
->height0
,
888 src_rect
? *src_rect
: default_rect(&s
->layers
[layer
]),
889 dst_rect
? *dst_rect
: default_rect(&s
->layers
[layer
]));
893 vl_compositor_set_rgba_layer(struct vl_compositor_state
*s
,
894 struct vl_compositor
*c
,
896 struct pipe_sampler_view
*rgba
,
897 struct u_rect
*src_rect
,
898 struct u_rect
*dst_rect
)
900 assert(s
&& c
&& rgba
);
902 assert(layer
< VL_COMPOSITOR_MAX_LAYERS
);
904 s
->used_layers
|= 1 << layer
;
905 s
->layers
[layer
].fs
= c
->fs_rgba
;
906 s
->layers
[layer
].samplers
[0] = c
->sampler_linear
;
907 s
->layers
[layer
].samplers
[1] = NULL
;
908 s
->layers
[layer
].samplers
[2] = NULL
;
909 pipe_sampler_view_reference(&s
->layers
[layer
].sampler_views
[0], rgba
);
910 pipe_sampler_view_reference(&s
->layers
[layer
].sampler_views
[1], NULL
);
911 pipe_sampler_view_reference(&s
->layers
[layer
].sampler_views
[2], NULL
);
912 calc_src_and_dst(&s
->layers
[layer
], rgba
->texture
->width0
, rgba
->texture
->height0
,
913 src_rect
? *src_rect
: default_rect(&s
->layers
[layer
]),
914 dst_rect
? *dst_rect
: default_rect(&s
->layers
[layer
]));
918 vl_compositor_render(struct vl_compositor_state
*s
,
919 struct vl_compositor
*c
,
920 struct pipe_surface
*dst_surface
,
921 struct u_rect
*dirty_area
)
926 c
->fb_state
.width
= dst_surface
->width
;
927 c
->fb_state
.height
= dst_surface
->height
;
928 c
->fb_state
.cbufs
[0] = dst_surface
;
930 if (!s
->viewport_valid
) {
931 s
->viewport
.scale
[0] = dst_surface
->width
;
932 s
->viewport
.scale
[1] = dst_surface
->height
;
933 s
->viewport
.translate
[0] = 0;
934 s
->viewport
.translate
[1] = 0;
937 if (!s
->scissor_valid
) {
940 s
->scissor
.maxx
= dst_surface
->width
;
941 s
->scissor
.maxy
= dst_surface
->height
;
944 gen_vertex_data(c
, s
, dirty_area
);
946 if (dirty_area
&& (dirty_area
->x0
< dirty_area
->x1
||
947 dirty_area
->y0
< dirty_area
->y1
)) {
949 c
->pipe
->clear_render_target(c
->pipe
, dst_surface
, &s
->clear_color
,
950 0, 0, dst_surface
->width
, dst_surface
->height
);
951 dirty_area
->x0
= dirty_area
->y0
= MAX_DIRTY
;
952 dirty_area
->x1
= dirty_area
->y1
= MIN_DIRTY
;
955 c
->pipe
->set_scissor_state(c
->pipe
, &s
->scissor
);
956 c
->pipe
->set_framebuffer_state(c
->pipe
, &c
->fb_state
);
957 c
->pipe
->set_viewport_state(c
->pipe
, &s
->viewport
);
958 c
->pipe
->bind_vs_state(c
->pipe
, c
->vs
);
959 c
->pipe
->set_vertex_buffers(c
->pipe
, 1, &c
->vertex_buf
);
960 c
->pipe
->bind_vertex_elements_state(c
->pipe
, c
->vertex_elems_state
);
961 c
->pipe
->set_constant_buffer(c
->pipe
, PIPE_SHADER_FRAGMENT
, 0, s
->csc_matrix
);
962 c
->pipe
->bind_rasterizer_state(c
->pipe
, c
->rast
);
964 draw_layers(c
, s
, dirty_area
);
968 vl_compositor_init(struct vl_compositor
*c
, struct pipe_context
*pipe
)
972 memset(c
, 0, sizeof(*c
));
976 if (!init_pipe_state(c
))
979 if (!init_shaders(c
)) {
980 cleanup_pipe_state(c
);
984 if (!init_buffers(c
)) {
986 cleanup_pipe_state(c
);
994 vl_compositor_init_state(struct vl_compositor_state
*s
, struct pipe_context
*pipe
)
996 csc_matrix csc_matrix
;
1000 memset(s
, 0, sizeof(*s
));
1004 s
->viewport
.scale
[2] = 1;
1005 s
->viewport
.scale
[3] = 1;
1006 s
->viewport
.translate
[2] = 0;
1007 s
->viewport
.translate
[3] = 0;
1009 s
->clear_color
.f
[0] = s
->clear_color
.f
[1] = 0.0f
;
1010 s
->clear_color
.f
[2] = s
->clear_color
.f
[3] = 0.0f
;
1013 * Create our fragment shader's constant buffer
1014 * Const buffer contains the color conversion matrix and bias vectors
1016 /* XXX: Create with IMMUTABLE/STATIC... although it does change every once in a long while... */
1017 s
->csc_matrix
= pipe_buffer_create
1020 PIPE_BIND_CONSTANT_BUFFER
,
1025 vl_compositor_clear_layers(s
);
1027 vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_IDENTITY
, NULL
, true, csc_matrix
);
1028 vl_compositor_set_csc_matrix(s
, csc_matrix
);
1034 vl_compositor_cleanup_state(struct vl_compositor_state
*s
)
1038 vl_compositor_clear_layers(s
);
1039 pipe_resource_reference(&s
->csc_matrix
, NULL
);