1 /**********************************************************
2 * Copyright 2009-2011 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 *********************************************************
26 * Zack Rusin <zackr-at-vmware-dot-com>
27 * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 #include "xa_composite.h"
31 #include "xa_context.h"
33 #include "cso_cache/cso_context.h"
34 #include "util/u_sampler.h"
35 #include "util/u_inlines.h"
38 /*XXX also in Xrender.h but the including it here breaks compilition */
39 #define XFixedToDouble(f) (((double) (f)) / 65536.)
41 struct xa_composite_blend
{
44 unsigned alpha_dst
: 4;
45 unsigned alpha_src
: 4;
47 unsigned rgb_src
: 8; /**< PIPE_BLENDFACTOR_x */
48 unsigned rgb_dst
: 8; /**< PIPE_BLENDFACTOR_x */
51 #define XA_BLEND_OP_OVER 3
52 static const struct xa_composite_blend xa_blends
[] = {
54 0, 0, PIPE_BLENDFACTOR_ZERO
, PIPE_BLENDFACTOR_ZERO
},
56 0, 0, PIPE_BLENDFACTOR_ONE
, PIPE_BLENDFACTOR_ZERO
},
58 0, 0, PIPE_BLENDFACTOR_ZERO
, PIPE_BLENDFACTOR_ONE
},
60 0, 1, PIPE_BLENDFACTOR_ONE
, PIPE_BLENDFACTOR_INV_SRC_ALPHA
},
62 1, 0, PIPE_BLENDFACTOR_INV_DST_ALPHA
, PIPE_BLENDFACTOR_ONE
},
64 1, 0, PIPE_BLENDFACTOR_DST_ALPHA
, PIPE_BLENDFACTOR_ZERO
},
66 0, 1, PIPE_BLENDFACTOR_ZERO
, PIPE_BLENDFACTOR_SRC_ALPHA
},
68 1, 0, PIPE_BLENDFACTOR_INV_DST_ALPHA
, PIPE_BLENDFACTOR_ZERO
},
70 0, 1, PIPE_BLENDFACTOR_ZERO
, PIPE_BLENDFACTOR_INV_SRC_ALPHA
},
72 1, 1, PIPE_BLENDFACTOR_DST_ALPHA
, PIPE_BLENDFACTOR_INV_SRC_ALPHA
},
74 1, 1, PIPE_BLENDFACTOR_INV_DST_ALPHA
, PIPE_BLENDFACTOR_SRC_ALPHA
},
76 1, 1, PIPE_BLENDFACTOR_INV_DST_ALPHA
, PIPE_BLENDFACTOR_INV_SRC_ALPHA
},
78 0, 0, PIPE_BLENDFACTOR_ONE
, PIPE_BLENDFACTOR_ONE
},
82 * The alpha value stored in a L8 texture is read by the
83 * hardware as color, and R8 is read as red. The source alpha value
84 * at the end of the fragment shader is stored in all color channels,
85 * so the correct approach is to blend using DST_COLOR instead of
86 * DST_ALPHA and then output any color channel (L8) or the red channel (R8).
89 xa_convert_blend_for_luminance(unsigned factor
)
92 case PIPE_BLENDFACTOR_DST_ALPHA
:
93 return PIPE_BLENDFACTOR_DST_COLOR
;
94 case PIPE_BLENDFACTOR_INV_DST_ALPHA
:
95 return PIPE_BLENDFACTOR_INV_DST_COLOR
;
103 blend_for_op(struct xa_composite_blend
*blend
,
104 enum xa_composite_op op
,
105 struct xa_picture
*src_pic
,
106 struct xa_picture
*mask_pic
,
107 struct xa_picture
*dst_pic
)
109 const int num_blends
=
110 sizeof(xa_blends
)/sizeof(struct xa_composite_blend
);
112 boolean supported
= FALSE
;
115 * our default in case something goes wrong
117 *blend
= xa_blends
[XA_BLEND_OP_OVER
];
119 for (i
= 0; i
< num_blends
; ++i
) {
120 if (xa_blends
[i
].op
== op
) {
121 *blend
= xa_blends
[i
];
128 * No component alpha yet.
130 if (mask_pic
&& mask_pic
->component_alpha
&& blend
->alpha_src
)
136 if ((dst_pic
->srf
->tex
->format
== PIPE_FORMAT_L8_UNORM
||
137 dst_pic
->srf
->tex
->format
== PIPE_FORMAT_R8_UNORM
)) {
138 blend
->rgb_src
= xa_convert_blend_for_luminance(blend
->rgb_src
);
139 blend
->rgb_dst
= xa_convert_blend_for_luminance(blend
->rgb_dst
);
143 * If there's no dst alpha channel, adjust the blend op so that we'll treat
147 if (xa_format_a(dst_pic
->pict_format
) == 0 && blend
->alpha_dst
) {
148 if (blend
->rgb_src
== PIPE_BLENDFACTOR_DST_ALPHA
)
149 blend
->rgb_src
= PIPE_BLENDFACTOR_ONE
;
150 else if (blend
->rgb_src
== PIPE_BLENDFACTOR_INV_DST_ALPHA
)
151 blend
->rgb_src
= PIPE_BLENDFACTOR_ZERO
;
159 xa_repeat_to_gallium(int mode
)
162 case xa_wrap_clamp_to_border
:
163 return PIPE_TEX_WRAP_CLAMP_TO_BORDER
;
165 return PIPE_TEX_WRAP_REPEAT
;
166 case xa_wrap_mirror_repeat
:
167 return PIPE_TEX_WRAP_MIRROR_REPEAT
;
168 case xa_wrap_clamp_to_edge
:
169 return PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
173 return PIPE_TEX_WRAP_REPEAT
;
176 static inline boolean
177 xa_filter_to_gallium(int xrender_filter
, int *out_filter
)
180 switch (xrender_filter
) {
181 case xa_filter_nearest
:
182 *out_filter
= PIPE_TEX_FILTER_NEAREST
;
184 case xa_filter_linear
:
185 *out_filter
= PIPE_TEX_FILTER_LINEAR
;
188 *out_filter
= PIPE_TEX_FILTER_NEAREST
;
195 xa_is_filter_accelerated(struct xa_picture
*pic
)
198 if (pic
&& !xa_filter_to_gallium(pic
->filter
, &filter
))
204 * xa_src_pict_is_accelerated - Check whether we support acceleration
205 * of the given src_pict type
207 * \param src_pic[in]: Pointer to a union xa_source_pict to check.
209 * \returns TRUE if accelerated, FALSE otherwise.
212 xa_src_pict_is_accelerated(const union xa_source_pict
*src_pic
)
217 if (src_pic
->type
== xa_src_pict_solid_fill
||
218 src_pic
->type
== xa_src_pict_float_solid_fill
)
225 xa_composite_check_accelerated(const struct xa_composite
*comp
)
227 struct xa_picture
*src_pic
= comp
->src
;
228 struct xa_picture
*mask_pic
= comp
->mask
;
229 struct xa_composite_blend blend
;
231 if (!xa_is_filter_accelerated(src_pic
) ||
232 !xa_is_filter_accelerated(comp
->mask
)) {
233 return -XA_ERR_INVAL
;
236 if (!xa_src_pict_is_accelerated(src_pic
->src_pict
) ||
237 (mask_pic
&& !xa_src_pict_is_accelerated(mask_pic
->src_pict
)))
238 return -XA_ERR_INVAL
;
240 if (!blend_for_op(&blend
, comp
->op
, comp
->src
, comp
->mask
, comp
->dst
))
241 return -XA_ERR_INVAL
;
244 * No component alpha yet.
246 if (mask_pic
&& mask_pic
->component_alpha
&& blend
.alpha_src
)
247 return -XA_ERR_INVAL
;
253 bind_composite_blend_state(struct xa_context
*ctx
,
254 const struct xa_composite
*comp
)
256 struct xa_composite_blend blend_opt
;
257 struct pipe_blend_state blend
;
259 if (!blend_for_op(&blend_opt
, comp
->op
, comp
->src
, comp
->mask
, comp
->dst
))
260 return -XA_ERR_INVAL
;
262 memset(&blend
, 0, sizeof(struct pipe_blend_state
));
263 blend
.rt
[0].blend_enable
= 1;
264 blend
.rt
[0].colormask
= PIPE_MASK_RGBA
;
266 blend
.rt
[0].rgb_src_factor
= blend_opt
.rgb_src
;
267 blend
.rt
[0].alpha_src_factor
= blend_opt
.rgb_src
;
268 blend
.rt
[0].rgb_dst_factor
= blend_opt
.rgb_dst
;
269 blend
.rt
[0].alpha_dst_factor
= blend_opt
.rgb_dst
;
271 cso_set_blend(ctx
->cso
, &blend
);
276 picture_format_fixups(struct xa_picture
*src_pic
,
279 boolean set_alpha
= FALSE
;
280 boolean swizzle
= FALSE
;
282 struct xa_surface
*src
= src_pic
->srf
;
283 enum xa_formats src_hw_format
, src_pic_format
;
284 enum xa_surface_type src_hw_type
, src_pic_type
;
289 src_hw_format
= xa_surface_format(src
);
290 src_pic_format
= src_pic
->pict_format
;
292 set_alpha
= (xa_format_type_is_color(src_hw_format
) &&
293 xa_format_a(src_pic_format
) == 0);
296 ret
|= mask
? FS_MASK_SET_ALPHA
: FS_SRC_SET_ALPHA
;
298 if (src_hw_format
== src_pic_format
) {
299 if (src
->tex
->format
== PIPE_FORMAT_L8_UNORM
||
300 src
->tex
->format
== PIPE_FORMAT_R8_UNORM
)
301 return ((mask
) ? FS_MASK_LUMINANCE
: FS_SRC_LUMINANCE
);
306 src_hw_type
= xa_format_type(src_hw_format
);
307 src_pic_type
= xa_format_type(src_pic_format
);
309 swizzle
= ((src_hw_type
== xa_type_argb
&&
310 src_pic_type
== xa_type_abgr
) ||
311 ((src_hw_type
== xa_type_abgr
&&
312 src_pic_type
== xa_type_argb
)));
314 if (!swizzle
&& (src_hw_type
!= src_pic_type
))
318 ret
|= mask
? FS_MASK_SWIZZLE_RGB
: FS_SRC_SWIZZLE_RGB
;
324 xa_src_in_mask(float src
[4], const float mask
[4])
333 * xa_handle_src_pict - Set up xa_context state and fragment shader
334 * input based on scr_pict type
336 * \param ctx[in, out]: Pointer to the xa context.
337 * \param src_pict[in]: Pointer to the union xa_source_pict to consider.
338 * \param is_mask[in]: Whether we're considering a mask picture.
340 * \returns TRUE if succesful, FALSE otherwise.
342 * This function computes some xa_context state used to determine whether
343 * to upload the solid color and also the solid color itself used as an input
344 * to the fragment shader.
347 xa_handle_src_pict(struct xa_context
*ctx
,
348 const union xa_source_pict
*src_pict
,
351 float solid_color
[4];
353 switch(src_pict
->type
) {
354 case xa_src_pict_solid_fill
:
355 xa_pixel_to_float4(src_pict
->solid_fill
.color
, solid_color
);
357 case xa_src_pict_float_solid_fill
:
358 memcpy(solid_color
, src_pict
->float_solid_fill
.color
,
359 sizeof(solid_color
));
365 if (is_mask
&& ctx
->has_solid_src
)
366 xa_src_in_mask(ctx
->solid_color
, solid_color
);
368 memcpy(ctx
->solid_color
, solid_color
, sizeof(solid_color
));
371 ctx
->has_solid_mask
= TRUE
;
373 ctx
->has_solid_src
= TRUE
;
379 bind_shaders(struct xa_context
*ctx
, const struct xa_composite
*comp
)
381 unsigned vs_traits
= 0, fs_traits
= 0;
382 struct xa_shader shader
;
383 struct xa_picture
*src_pic
= comp
->src
;
384 struct xa_picture
*mask_pic
= comp
->mask
;
385 struct xa_picture
*dst_pic
= comp
->dst
;
387 ctx
->has_solid_src
= FALSE
;
388 ctx
->has_solid_mask
= FALSE
;
390 if (dst_pic
&& xa_format_type(dst_pic
->pict_format
) !=
391 xa_format_type(xa_surface_format(dst_pic
->srf
)))
392 return -XA_ERR_INVAL
;
395 if (src_pic
->wrap
== xa_wrap_clamp_to_border
&& src_pic
->has_transform
)
396 fs_traits
|= FS_SRC_REPEAT_NONE
;
398 fs_traits
|= FS_COMPOSITE
;
399 vs_traits
|= VS_COMPOSITE
;
401 if (src_pic
->src_pict
) {
402 if (!xa_handle_src_pict(ctx
, src_pic
->src_pict
, false))
403 return -XA_ERR_INVAL
;
404 fs_traits
|= FS_SRC_SRC
;
405 vs_traits
|= VS_SRC_SRC
;
407 fs_traits
|= picture_format_fixups(src_pic
, 0);
411 vs_traits
|= VS_MASK
;
412 fs_traits
|= FS_MASK
;
413 if (mask_pic
->component_alpha
)
415 if (mask_pic
->src_pict
) {
416 if (!xa_handle_src_pict(ctx
, mask_pic
->src_pict
, true))
417 return -XA_ERR_INVAL
;
419 if (ctx
->has_solid_src
) {
420 vs_traits
&= ~VS_MASK
;
421 fs_traits
&= ~FS_MASK
;
423 vs_traits
|= VS_MASK_SRC
;
424 fs_traits
|= FS_MASK_SRC
;
427 if (mask_pic
->wrap
== xa_wrap_clamp_to_border
&&
428 mask_pic
->has_transform
)
429 fs_traits
|= FS_MASK_REPEAT_NONE
;
431 fs_traits
|= picture_format_fixups(mask_pic
, 1);
435 if (ctx
->srf
->format
== PIPE_FORMAT_L8_UNORM
||
436 ctx
->srf
->format
== PIPE_FORMAT_R8_UNORM
)
437 fs_traits
|= FS_DST_LUMINANCE
;
439 shader
= xa_shaders_get(ctx
->shaders
, vs_traits
, fs_traits
);
440 cso_set_vertex_shader_handle(ctx
->cso
, shader
.vs
);
441 cso_set_fragment_shader_handle(ctx
->cso
, shader
.fs
);
446 bind_samplers(struct xa_context
*ctx
,
447 const struct xa_composite
*comp
)
449 struct pipe_sampler_state
*samplers
[PIPE_MAX_SAMPLERS
];
450 struct pipe_sampler_state src_sampler
, mask_sampler
;
451 struct pipe_sampler_view view_templ
;
452 struct pipe_sampler_view
*src_view
;
453 struct pipe_context
*pipe
= ctx
->pipe
;
454 struct xa_picture
*src_pic
= comp
->src
;
455 struct xa_picture
*mask_pic
= comp
->mask
;
456 int num_samplers
= 0;
458 xa_ctx_sampler_views_destroy(ctx
);
459 memset(&src_sampler
, 0, sizeof(struct pipe_sampler_state
));
460 memset(&mask_sampler
, 0, sizeof(struct pipe_sampler_state
));
462 if (src_pic
&& !ctx
->has_solid_src
) {
463 unsigned src_wrap
= xa_repeat_to_gallium(src_pic
->wrap
);
466 (void) xa_filter_to_gallium(src_pic
->filter
, &filter
);
468 src_sampler
.wrap_s
= src_wrap
;
469 src_sampler
.wrap_t
= src_wrap
;
470 src_sampler
.min_img_filter
= filter
;
471 src_sampler
.mag_img_filter
= filter
;
472 src_sampler
.min_mip_filter
= PIPE_TEX_MIPFILTER_NEAREST
;
473 src_sampler
.normalized_coords
= 1;
474 samplers
[0] = &src_sampler
;
475 u_sampler_view_default_template(&view_templ
,
476 src_pic
->srf
->tex
,+ src_pic
->srf
->tex
->format
);
477 src_view
= pipe
->create_sampler_view(pipe
, src_pic
->srf
->tex
,
479 ctx
->bound_sampler_views
[0] = src_view
;
483 if (mask_pic
&& !ctx
->has_solid_mask
) {
484 unsigned mask_wrap
= xa_repeat_to_gallium(mask_pic
->wrap
);
487 (void) xa_filter_to_gallium(mask_pic
->filter
, &filter
);
489 mask_sampler
.wrap_s
= mask_wrap
;
490 mask_sampler
.wrap_t
= mask_wrap
;
491 mask_sampler
.min_img_filter
= filter
;
492 mask_sampler
.mag_img_filter
= filter
;
493 src_sampler
.min_mip_filter
= PIPE_TEX_MIPFILTER_NEAREST
;
494 mask_sampler
.normalized_coords
= 1;
495 samplers
[num_samplers
] = &mask_sampler
;
496 u_sampler_view_default_template(&view_templ
,
498 mask_pic
->srf
->tex
->format
);
499 src_view
= pipe
->create_sampler_view(pipe
, mask_pic
->srf
->tex
,
501 ctx
->bound_sampler_views
[num_samplers
] = src_view
;
505 cso_set_samplers(ctx
->cso
, PIPE_SHADER_FRAGMENT
, num_samplers
,
506 (const struct pipe_sampler_state
**)samplers
);
507 cso_set_sampler_views(ctx
->cso
, PIPE_SHADER_FRAGMENT
, num_samplers
,
508 ctx
->bound_sampler_views
);
509 ctx
->num_bound_samplers
= num_samplers
;
513 xa_composite_prepare(struct xa_context
*ctx
,
514 const struct xa_composite
*comp
)
516 struct xa_surface
*dst_srf
= comp
->dst
->srf
;
519 ret
= xa_ctx_srf_create(ctx
, dst_srf
);
520 if (ret
!= XA_ERR_NONE
)
524 renderer_bind_destination(ctx
, ctx
->srf
);
526 ret
= bind_composite_blend_state(ctx
, comp
);
527 if (ret
!= XA_ERR_NONE
)
529 ret
= bind_shaders(ctx
, comp
);
530 if (ret
!= XA_ERR_NONE
)
532 bind_samplers(ctx
, comp
);
534 if (ctx
->num_bound_samplers
== 0 ) { /* solid fill */
535 renderer_begin_solid(ctx
);
537 renderer_begin_textures(ctx
);
541 xa_ctx_srf_destroy(ctx
);
546 xa_composite_rect(struct xa_context
*ctx
,
547 int srcX
, int srcY
, int maskX
, int maskY
,
548 int dstX
, int dstY
, int width
, int height
)
550 if (ctx
->num_bound_samplers
== 0 ) { /* solid fill */
551 xa_scissor_update(ctx
, dstX
, dstY
, dstX
+ width
, dstY
+ height
);
552 renderer_solid(ctx
, dstX
, dstY
, dstX
+ width
, dstY
+ height
);
554 const struct xa_composite
*comp
= ctx
->comp
;
555 int pos
[6] = {srcX
, srcY
, maskX
, maskY
, dstX
, dstY
};
556 const float *src_matrix
= NULL
;
557 const float *mask_matrix
= NULL
;
559 xa_scissor_update(ctx
, dstX
, dstY
, dstX
+ width
, dstY
+ height
);
561 if (comp
->src
->has_transform
)
562 src_matrix
= comp
->src
->transform
;
563 if (comp
->mask
&& comp
->mask
->has_transform
)
564 mask_matrix
= comp
->mask
->transform
;
566 renderer_texture(ctx
, pos
, width
, height
,
567 src_matrix
, mask_matrix
);
572 xa_composite_done(struct xa_context
*ctx
)
574 renderer_draw_flush(ctx
);
577 ctx
->has_solid_src
= FALSE
;
578 ctx
->has_solid_mask
= FALSE
;
579 xa_ctx_sampler_views_destroy(ctx
);
582 static const struct xa_composite_allocation a
= {
583 .xa_composite_size
= sizeof(struct xa_composite
),
584 .xa_picture_size
= sizeof(struct xa_picture
),
585 .xa_source_pict_size
= sizeof(union xa_source_pict
),
588 XA_EXPORT
const struct xa_composite_allocation
*
589 xa_composite_allocation(void)