1 /**************************************************************************
3 * Copyright 2009 Younes Manton.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vl_compositor.h"
30 #include <pipe/p_context.h>
31 #include <util/u_inlines.h>
32 #include <util/u_memory.h>
33 #include <tgsi/tgsi_ureg.h>
36 struct vertex_shader_consts
38 struct vertex4f dst_scale
;
39 struct vertex4f dst_trans
;
40 struct vertex4f src_scale
;
41 struct vertex4f src_trans
;
44 struct fragment_shader_consts
50 u_video_rects_equal(struct pipe_video_rect
*a
, struct pipe_video_rect
*b
)
67 create_vert_shader(struct vl_compositor
*c
)
69 struct ureg_program
*shader
;
70 struct ureg_src vpos
, vtex
;
71 struct ureg_dst o_vpos
, o_vtex
;
73 shader
= ureg_create(TGSI_PROCESSOR_VERTEX
);
77 vpos
= ureg_DECL_vs_input(shader
, 0);
78 vtex
= ureg_DECL_vs_input(shader
, 1);
79 o_vpos
= ureg_DECL_output(shader
, TGSI_SEMANTIC_POSITION
, 0);
80 o_vtex
= ureg_DECL_output(shader
, TGSI_SEMANTIC_GENERIC
, 1);
86 ureg_MOV(shader
, o_vpos
, vpos
);
87 ureg_MOV(shader
, o_vtex
, vtex
);
91 c
->vertex_shader
= ureg_create_shader_and_destroy(shader
, c
->pipe
);
92 if (!c
->vertex_shader
)
99 create_frag_shader(struct vl_compositor
*c
)
101 struct ureg_program
*shader
;
103 struct ureg_src csc
[4];
104 struct ureg_src sampler
;
105 struct ureg_dst texel
;
106 struct ureg_dst fragment
;
109 shader
= ureg_create(TGSI_PROCESSOR_FRAGMENT
);
113 tc
= ureg_DECL_fs_input(shader
, TGSI_SEMANTIC_GENERIC
, 1, TGSI_INTERPOLATE_LINEAR
);
114 for (i
= 0; i
< 4; ++i
)
115 csc
[i
] = ureg_DECL_constant(shader
, i
);
116 sampler
= ureg_DECL_sampler(shader
, 0);
117 texel
= ureg_DECL_temporary(shader
);
118 fragment
= ureg_DECL_output(shader
, TGSI_SEMANTIC_COLOR
, 0);
121 * texel = tex(tc, sampler)
122 * fragment = csc * texel
124 ureg_TEX(shader
, texel
, TGSI_TEXTURE_2D
, tc
, sampler
);
125 for (i
= 0; i
< 4; ++i
)
126 ureg_DP4(shader
, ureg_writemask(fragment
, TGSI_WRITEMASK_X
<< i
), csc
[i
], ureg_src(texel
));
128 ureg_release_temporary(shader
, texel
);
131 c
->fragment_shader
= ureg_create_shader_and_destroy(shader
, c
->pipe
);
132 if (!c
->fragment_shader
)
139 init_pipe_state(struct vl_compositor
*c
)
141 struct pipe_sampler_state sampler
;
145 c
->fb_state
.nr_cbufs
= 1;
146 c
->fb_state
.zsbuf
= NULL
;
148 sampler
.wrap_s
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
149 sampler
.wrap_t
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
150 sampler
.wrap_r
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
151 sampler
.min_img_filter
= PIPE_TEX_FILTER_LINEAR
;
152 sampler
.min_mip_filter
= PIPE_TEX_MIPFILTER_NONE
;
153 sampler
.mag_img_filter
= PIPE_TEX_FILTER_LINEAR
;
154 sampler
.compare_mode
= PIPE_TEX_COMPARE_NONE
;
155 sampler
.compare_func
= PIPE_FUNC_ALWAYS
;
156 sampler
.normalized_coords
= 1;
157 /*sampler.lod_bias = ;*/
158 /*sampler.min_lod = ;*/
159 /*sampler.max_lod = ;*/
160 /*sampler.border_color[i] = ;*/
161 /*sampler.max_anisotropy = ;*/
162 c
->sampler
= c
->pipe
->create_sampler_state(c
->pipe
, &sampler
);
167 static void cleanup_pipe_state(struct vl_compositor
*c
)
171 c
->pipe
->delete_sampler_state(c
->pipe
, c
->sampler
);
175 init_shaders(struct vl_compositor
*c
)
179 create_vert_shader(c
);
180 create_frag_shader(c
);
185 static void cleanup_shaders(struct vl_compositor
*c
)
189 c
->pipe
->delete_vs_state(c
->pipe
, c
->vertex_shader
);
190 c
->pipe
->delete_fs_state(c
->pipe
, c
->fragment_shader
);
194 init_buffers(struct vl_compositor
*c
)
196 struct fragment_shader_consts fsc
;
197 struct pipe_vertex_element vertex_elems
[2];
202 * Create our vertex buffer and vertex buffer elements
204 c
->vertex_buf
.stride
= sizeof(struct vertex4f
);
205 c
->vertex_buf
.max_index
= (VL_COMPOSITOR_MAX_LAYERS
+ 2) * 6 - 1;
206 c
->vertex_buf
.buffer_offset
= 0;
207 /* XXX: Create with DYNAMIC or STREAM */
208 c
->vertex_buf
.buffer
= pipe_buffer_create
211 PIPE_BIND_VERTEX_BUFFER
,
212 sizeof(struct vertex4f
) * (VL_COMPOSITOR_MAX_LAYERS
+ 2) * 6
215 vertex_elems
[0].src_offset
= 0;
216 vertex_elems
[0].instance_divisor
= 0;
217 vertex_elems
[0].vertex_buffer_index
= 0;
218 vertex_elems
[0].src_format
= PIPE_FORMAT_R32G32_FLOAT
;
219 vertex_elems
[1].src_offset
= sizeof(struct vertex2f
);
220 vertex_elems
[1].instance_divisor
= 0;
221 vertex_elems
[1].vertex_buffer_index
= 0;
222 vertex_elems
[1].src_format
= PIPE_FORMAT_R32G32_FLOAT
;
223 c
->vertex_elems_state
= c
->pipe
->create_vertex_elements_state(c
->pipe
, 2, vertex_elems
);
226 * Create our fragment shader's constant buffer
227 * Const buffer contains the color conversion matrix and bias vectors
229 /* XXX: Create with IMMUTABLE/STATIC... although it does change every once in a long while... */
230 c
->fs_const_buf
= pipe_buffer_create
233 PIPE_BIND_CONSTANT_BUFFER
,
234 sizeof(struct fragment_shader_consts
)
237 vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_IDENTITY
, NULL
, true, fsc
.matrix
);
239 vl_compositor_set_csc_matrix(c
, fsc
.matrix
);
245 cleanup_buffers(struct vl_compositor
*c
)
249 c
->pipe
->delete_vertex_elements_state(c
->pipe
, c
->vertex_elems_state
);
250 pipe_resource_reference(&c
->vertex_buf
.buffer
, NULL
);
251 pipe_resource_reference(&c
->fs_const_buf
, NULL
);
254 bool vl_compositor_init(struct vl_compositor
*compositor
, struct pipe_context
*pipe
)
260 memset(compositor
, 0, sizeof(struct vl_compositor
));
262 compositor
->pipe
= pipe
;
264 if (!init_pipe_state(compositor
))
266 if (!init_shaders(compositor
)) {
267 cleanup_pipe_state(compositor
);
270 if (!init_buffers(compositor
)) {
271 cleanup_shaders(compositor
);
272 cleanup_pipe_state(compositor
);
276 compositor
->fb_state
.width
= 0;
277 compositor
->fb_state
.height
= 0;
278 compositor
->bg
= NULL
;
279 compositor
->dirty_bg
= false;
280 for (i
= 0; i
< VL_COMPOSITOR_MAX_LAYERS
; ++i
)
281 compositor
->layers
[i
] = NULL
;
282 compositor
->dirty_layers
= 0;
287 void vl_compositor_cleanup(struct vl_compositor
*compositor
)
291 cleanup_buffers(compositor
);
292 cleanup_shaders(compositor
);
293 cleanup_pipe_state(compositor
);
296 void vl_compositor_set_background(struct vl_compositor
*compositor
,
297 struct pipe_surface
*bg
, struct pipe_video_rect
*bg_src_rect
)
300 assert((bg
&& bg_src_rect
) || (!bg
&& !bg_src_rect
));
302 if (compositor
->bg
!= bg
||
303 !u_video_rects_equal(&compositor
->bg_src_rect
, bg_src_rect
)) {
304 pipe_surface_reference(&compositor
->bg
, bg
);
305 /*if (!u_video_rects_equal(&compositor->bg_src_rect, bg_src_rect))*/
306 compositor
->bg_src_rect
= *bg_src_rect
;
307 compositor
->dirty_bg
= true;
311 void vl_compositor_set_layers(struct vl_compositor
*compositor
,
312 struct pipe_surface
*layers
[],
313 struct pipe_video_rect
*src_rects
[],
314 struct pipe_video_rect
*dst_rects
[],
320 assert(num_layers
<= VL_COMPOSITOR_MAX_LAYERS
);
322 for (i
= 0; i
< num_layers
; ++i
)
324 assert((layers
[i
] && src_rects
[i
] && dst_rects
[i
]) ||
325 (!layers
[i
] && !src_rects
[i
] && !dst_rects
[i
]));
327 if (compositor
->layers
[i
] != layers
[i
] ||
328 !u_video_rects_equal(&compositor
->layer_src_rects
[i
], src_rects
[i
]) ||
329 !u_video_rects_equal(&compositor
->layer_dst_rects
[i
], dst_rects
[i
]))
331 pipe_surface_reference(&compositor
->layers
[i
], layers
[i
]);
332 /*if (!u_video_rects_equal(&compositor->layer_src_rects[i], src_rects[i]))*/
333 compositor
->layer_src_rects
[i
] = *src_rects
[i
];
334 /*if (!u_video_rects_equal(&compositor->layer_dst_rects[i], dst_rects[i]))*/
335 compositor
->layer_dst_rects
[i
] = *dst_rects
[i
];
336 compositor
->dirty_layers
|= 1 << i
;
340 for (; i
< VL_COMPOSITOR_MAX_LAYERS
; ++i
)
341 pipe_surface_reference(&compositor
->layers
[i
], NULL
);
344 static void gen_rect_verts(unsigned pos
,
345 struct pipe_video_rect
*src_rect
,
346 struct vertex2f
*src_inv_size
,
347 struct pipe_video_rect
*dst_rect
,
348 struct vertex2f
*dst_inv_size
,
351 assert(pos
< VL_COMPOSITOR_MAX_LAYERS
+ 2);
353 assert(src_inv_size
);
354 assert((dst_rect
&& dst_inv_size
) /*|| (!dst_rect && !dst_inv_size)*/);
357 vb
[pos
* 6 + 0].x
= dst_rect
->x
* dst_inv_size
->x
;
358 vb
[pos
* 6 + 0].y
= dst_rect
->y
* dst_inv_size
->y
;
359 vb
[pos
* 6 + 0].z
= src_rect
->x
* src_inv_size
->x
;
360 vb
[pos
* 6 + 0].w
= src_rect
->y
* src_inv_size
->y
;
362 vb
[pos
* 6 + 1].x
= dst_rect
->x
* dst_inv_size
->x
;
363 vb
[pos
* 6 + 1].y
= (dst_rect
->y
+ dst_rect
->h
) * dst_inv_size
->y
;
364 vb
[pos
* 6 + 1].z
= src_rect
->x
* src_inv_size
->x
;
365 vb
[pos
* 6 + 1].w
= (src_rect
->y
+ src_rect
->h
) * src_inv_size
->y
;
367 vb
[pos
* 6 + 2].x
= (dst_rect
->x
+ dst_rect
->w
) * dst_inv_size
->x
;
368 vb
[pos
* 6 + 2].y
= dst_rect
->y
* dst_inv_size
->y
;
369 vb
[pos
* 6 + 2].z
= (src_rect
->x
+ src_rect
->w
) * src_inv_size
->x
;
370 vb
[pos
* 6 + 2].w
= src_rect
->y
* src_inv_size
->y
;
372 vb
[pos
* 6 + 3].x
= (dst_rect
->x
+ dst_rect
->w
) * dst_inv_size
->x
;
373 vb
[pos
* 6 + 3].y
= dst_rect
->y
* dst_inv_size
->y
;
374 vb
[pos
* 6 + 3].z
= (src_rect
->x
+ src_rect
->w
) * src_inv_size
->x
;
375 vb
[pos
* 6 + 3].w
= src_rect
->y
* src_inv_size
->y
;
377 vb
[pos
* 6 + 4].x
= dst_rect
->x
* dst_inv_size
->x
;
378 vb
[pos
* 6 + 4].y
= (dst_rect
->y
+ dst_rect
->h
) * dst_inv_size
->y
;
379 vb
[pos
* 6 + 4].z
= src_rect
->x
* src_inv_size
->x
;
380 vb
[pos
* 6 + 4].w
= (src_rect
->y
+ src_rect
->h
) * src_inv_size
->y
;
382 vb
[pos
* 6 + 5].x
= (dst_rect
->x
+ dst_rect
->w
) * dst_inv_size
->x
;
383 vb
[pos
* 6 + 5].y
= (dst_rect
->y
+ dst_rect
->h
) * dst_inv_size
->y
;
384 vb
[pos
* 6 + 5].z
= (src_rect
->x
+ src_rect
->w
) * src_inv_size
->x
;
385 vb
[pos
* 6 + 5].w
= (src_rect
->y
+ src_rect
->h
) * src_inv_size
->y
;
388 static unsigned gen_data(struct vl_compositor
*c
,
389 struct pipe_surface
*src_surface
,
390 struct pipe_video_rect
*src_rect
,
391 struct pipe_video_rect
*dst_rect
,
392 struct pipe_surface
**textures
)
395 struct pipe_transfer
*buf_transfer
;
396 unsigned num_rects
= 0;
405 vb
= pipe_buffer_map(c
->pipe
, c
->vertex_buf
.buffer
,
406 PIPE_TRANSFER_WRITE
| PIPE_TRANSFER_DISCARD
,
413 struct vertex2f bg_inv_size
= {1.0f
/ c
->bg
->width
, 1.0f
/ c
->bg
->height
};
414 gen_rect_verts(num_rects
, &c
->bg_src_rect
, &bg_inv_size
, NULL
, NULL
, vb
);
415 textures
[num_rects
] = c
->bg
;
421 struct vertex2f src_inv_size
= { 1.0f
/ src_surface
->width
, 1.0f
/ src_surface
->height
};
422 gen_rect_verts(num_rects
, src_rect
, &src_inv_size
, dst_rect
, &c
->fb_inv_size
, vb
);
423 textures
[num_rects
] = src_surface
;
427 for (i
= 0; c
->dirty_layers
> 0; i
++) {
428 assert(i
< VL_COMPOSITOR_MAX_LAYERS
);
430 if (c
->dirty_layers
& (1 << i
)) {
431 struct vertex2f layer_inv_size
= {1.0f
/ c
->layers
[i
]->width
, 1.0f
/ c
->layers
[i
]->height
};
432 gen_rect_verts(num_rects
, &c
->layer_src_rects
[i
], &layer_inv_size
,
433 &c
->layer_dst_rects
[i
], &c
->fb_inv_size
, vb
);
434 textures
[num_rects
] = c
->layers
[i
];
436 c
->dirty_layers
&= ~(1 << i
);
440 pipe_buffer_unmap(c
->pipe
, c
->vertex_buf
.buffer
, buf_transfer
);
445 static void draw_layers(struct vl_compositor
*c
,
446 struct pipe_surface
*src_surface
,
447 struct pipe_video_rect
*src_rect
,
448 struct pipe_video_rect
*dst_rect
)
451 struct pipe_surface
*src_surfaces
[VL_COMPOSITOR_MAX_LAYERS
+ 2];
459 num_rects
= gen_data(c
, src_surface
, src_rect
, dst_rect
, src_surfaces
);
461 for (i
= 0; i
< num_rects
; ++i
) {
462 //c->pipe->set_fragment_sampler_views(c->pipe, 1, &src_surfaces[i]->texture);
463 c
->pipe
->draw_arrays(c
->pipe
, PIPE_PRIM_TRIANGLES
, i
* 6, 6);
467 void vl_compositor_render(struct vl_compositor
*compositor
,
468 struct pipe_surface
*src_surface
,
469 enum pipe_mpeg12_picture_type picture_type
,
470 /*unsigned num_past_surfaces,
471 struct pipe_surface *past_surfaces,
472 unsigned num_future_surfaces,
473 struct pipe_surface *future_surfaces,*/
474 struct pipe_video_rect
*src_area
,
475 struct pipe_surface
*dst_surface
,
476 struct pipe_video_rect
*dst_area
,
477 struct pipe_fence_handle
**fence
)
484 assert(picture_type
== PIPE_MPEG12_PICTURE_TYPE_FRAME
);
486 if (compositor
->fb_state
.width
!= dst_surface
->width
) {
487 compositor
->fb_inv_size
.x
= 1.0f
/ dst_surface
->width
;
488 compositor
->fb_state
.width
= dst_surface
->width
;
490 if (compositor
->fb_state
.height
!= dst_surface
->height
) {
491 compositor
->fb_inv_size
.y
= 1.0f
/ dst_surface
->height
;
492 compositor
->fb_state
.height
= dst_surface
->height
;
495 compositor
->fb_state
.cbufs
[0] = dst_surface
;
497 compositor
->viewport
.scale
[0] = compositor
->fb_state
.width
;
498 compositor
->viewport
.scale
[1] = compositor
->fb_state
.height
;
499 compositor
->viewport
.scale
[2] = 1;
500 compositor
->viewport
.scale
[3] = 1;
501 compositor
->viewport
.translate
[0] = 0;
502 compositor
->viewport
.translate
[1] = 0;
503 compositor
->viewport
.translate
[2] = 0;
504 compositor
->viewport
.translate
[3] = 0;
506 compositor
->pipe
->set_framebuffer_state(compositor
->pipe
, &compositor
->fb_state
);
507 compositor
->pipe
->set_viewport_state(compositor
->pipe
, &compositor
->viewport
);
508 compositor
->pipe
->bind_fragment_sampler_states(compositor
->pipe
, 1, &compositor
->sampler
);
509 compositor
->pipe
->bind_vs_state(compositor
->pipe
, compositor
->vertex_shader
);
510 compositor
->pipe
->bind_fs_state(compositor
->pipe
, compositor
->fragment_shader
);
511 compositor
->pipe
->set_vertex_buffers(compositor
->pipe
, 1, &compositor
->vertex_buf
);
512 compositor
->pipe
->bind_vertex_elements_state(compositor
->pipe
, compositor
->vertex_elems_state
);
513 compositor
->pipe
->set_constant_buffer(compositor
->pipe
, PIPE_SHADER_FRAGMENT
, 0, compositor
->fs_const_buf
);
515 draw_layers(compositor
, src_surface
, src_area
, dst_area
);
517 assert(!compositor
->dirty_bg
&& !compositor
->dirty_layers
);
518 compositor
->pipe
->flush(compositor
->pipe
, PIPE_FLUSH_RENDER_CACHE
, fence
);
521 void vl_compositor_set_csc_matrix(struct vl_compositor
*compositor
, const float *mat
)
523 struct pipe_transfer
*buf_transfer
;
529 pipe_buffer_map(compositor
->pipe
, compositor
->fs_const_buf
,
530 PIPE_TRANSFER_WRITE
| PIPE_TRANSFER_DISCARD
,
533 sizeof(struct fragment_shader_consts
)
536 pipe_buffer_unmap(compositor
->pipe
, compositor
->fs_const_buf
,