1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007 VMware, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
31 * Code generate the whole fragment pipeline.
33 * The fragment pipeline consists of the following stages:
37 * - depth/stencil test
40 * This file has only the glue to assemble the fragment pipeline. The actual
41 * plumbing of converting Gallium state into LLVM IR is done elsewhere, in the
42 * lp_bld_*.[ch] files, and in a complete generic and reusable way. Here we
43 * muster the LLVM JIT execution engine to create a function that follows an
44 * established binary interface and that can be called from C directly.
46 * A big source of complexity here is that we often want to run different
47 * stages with different precisions and data types and precisions. For example,
48 * the fragment shader needs typically to be done in floats, but the
49 * depth/stencil test and blending is better done in the type that most closely
50 * matches the depth/stencil and color buffer respectively.
52 * Since the width of a SIMD vector register stays the same regardless of the
53 * element type, different types imply different number of elements, so we must
54 * code generate more instances of the stages with larger types to be able to
55 * feed/consume the stages with smaller types.
57 * @author Jose Fonseca <jfonseca@vmware.com>
61 #include "pipe/p_defines.h"
62 #include "util/u_inlines.h"
63 #include "util/u_memory.h"
64 #include "util/u_pointer.h"
65 #include "util/u_format.h"
66 #include "util/u_dump.h"
67 #include "util/u_string.h"
68 #include "util/simple_list.h"
69 #include "util/u_dual_blend.h"
70 #include "os/os_time.h"
71 #include "pipe/p_shader_tokens.h"
72 #include "draw/draw_context.h"
73 #include "tgsi/tgsi_dump.h"
74 #include "tgsi/tgsi_scan.h"
75 #include "tgsi/tgsi_parse.h"
76 #include "gallivm/lp_bld_type.h"
77 #include "gallivm/lp_bld_const.h"
78 #include "gallivm/lp_bld_conv.h"
79 #include "gallivm/lp_bld_init.h"
80 #include "gallivm/lp_bld_intr.h"
81 #include "gallivm/lp_bld_logic.h"
82 #include "gallivm/lp_bld_tgsi.h"
83 #include "gallivm/lp_bld_swizzle.h"
84 #include "gallivm/lp_bld_flow.h"
85 #include "gallivm/lp_bld_debug.h"
86 #include "gallivm/lp_bld_arit.h"
87 #include "gallivm/lp_bld_pack.h"
88 #include "gallivm/lp_bld_format.h"
89 #include "gallivm/lp_bld_quad.h"
91 #include "lp_bld_alpha.h"
92 #include "lp_bld_blend.h"
93 #include "lp_bld_depth.h"
94 #include "lp_bld_interp.h"
95 #include "lp_context.h"
100 #include "lp_tex_sample.h"
101 #include "lp_flush.h"
102 #include "lp_state_fs.h"
106 /** Fragment shader number (for debugging) */
107 static unsigned fs_no
= 0;
111 * Expand the relevant bits of mask_input to a n*4-dword mask for the
112 * n*four pixels in n 2x2 quads. This will set the n*four elements of the
113 * quad mask vector to 0 or ~0.
114 * Grouping is 01, 23 for 2 quad mode hence only 0 and 2 are valid
115 * quad arguments with fs length 8.
117 * \param first_quad which quad(s) of the quad group to test, in [0,3]
118 * \param mask_input bitwise mask for the whole 4x4 stamp
121 generate_quad_mask(struct gallivm_state
*gallivm
,
122 struct lp_type fs_type
,
124 LLVMValueRef mask_input
) /* int32 */
126 LLVMBuilderRef builder
= gallivm
->builder
;
127 struct lp_type mask_type
;
128 LLVMTypeRef i32t
= LLVMInt32TypeInContext(gallivm
->context
);
129 LLVMValueRef bits
[16];
130 LLVMValueRef mask
, bits_vec
;
134 * XXX: We'll need a different path for 16 x u8
136 assert(fs_type
.width
== 32);
137 assert(fs_type
.length
<= ARRAY_SIZE(bits
));
138 mask_type
= lp_int_type(fs_type
);
141 * mask_input >>= (quad * 4)
143 switch (first_quad
) {
148 assert(fs_type
.length
== 4);
155 assert(fs_type
.length
== 4);
163 mask_input
= LLVMBuildLShr(builder
,
165 LLVMConstInt(i32t
, shift
, 0),
169 * mask = { mask_input & (1 << i), for i in [0,3] }
171 mask
= lp_build_broadcast(gallivm
,
172 lp_build_vec_type(gallivm
, mask_type
),
175 for (i
= 0; i
< fs_type
.length
/ 4; i
++) {
176 unsigned j
= 2 * (i
% 2) + (i
/ 2) * 8;
177 bits
[4*i
+ 0] = LLVMConstInt(i32t
, 1ULL << (j
+ 0), 0);
178 bits
[4*i
+ 1] = LLVMConstInt(i32t
, 1ULL << (j
+ 1), 0);
179 bits
[4*i
+ 2] = LLVMConstInt(i32t
, 1ULL << (j
+ 4), 0);
180 bits
[4*i
+ 3] = LLVMConstInt(i32t
, 1ULL << (j
+ 5), 0);
182 bits_vec
= LLVMConstVector(bits
, fs_type
.length
);
183 mask
= LLVMBuildAnd(builder
, mask
, bits_vec
, "");
186 * mask = mask == bits ? ~0 : 0
188 mask
= lp_build_compare(gallivm
,
189 mask_type
, PIPE_FUNC_EQUAL
,
196 #define EARLY_DEPTH_TEST 0x1
197 #define LATE_DEPTH_TEST 0x2
198 #define EARLY_DEPTH_WRITE 0x4
199 #define LATE_DEPTH_WRITE 0x8
202 find_output_by_semantic( const struct tgsi_shader_info
*info
,
208 for (i
= 0; i
< info
->num_outputs
; i
++)
209 if (info
->output_semantic_name
[i
] == semantic
&&
210 info
->output_semantic_index
[i
] == index
)
218 * Fetch the specified lp_jit_viewport structure for a given viewport_index.
221 lp_llvm_viewport(LLVMValueRef context_ptr
,
222 struct gallivm_state
*gallivm
,
223 LLVMValueRef viewport_index
)
225 LLVMBuilderRef builder
= gallivm
->builder
;
228 struct lp_type viewport_type
=
229 lp_type_float_vec(32, 32 * LP_JIT_VIEWPORT_NUM_FIELDS
);
231 ptr
= lp_jit_context_viewports(gallivm
, context_ptr
);
232 ptr
= LLVMBuildPointerCast(builder
, ptr
,
233 LLVMPointerType(lp_build_vec_type(gallivm
, viewport_type
), 0), "");
235 res
= lp_build_pointer_get(builder
, ptr
, viewport_index
);
242 lp_build_depth_clamp(struct gallivm_state
*gallivm
,
243 LLVMBuilderRef builder
,
245 LLVMValueRef context_ptr
,
246 LLVMValueRef thread_data_ptr
,
249 LLVMValueRef viewport
, min_depth
, max_depth
;
250 LLVMValueRef viewport_index
;
251 struct lp_build_context f32_bld
;
253 assert(type
.floating
);
254 lp_build_context_init(&f32_bld
, gallivm
, type
);
257 * Assumes clamping of the viewport index will occur in setup/gs. Value
258 * is passed through the rasterization stage via lp_rast_shader_inputs.
260 * See: draw_clamp_viewport_idx and lp_clamp_viewport_idx for clamping
263 viewport_index
= lp_jit_thread_data_raster_state_viewport_index(gallivm
,
267 * Load the min and max depth from the lp_jit_context.viewports
268 * array of lp_jit_viewport structures.
270 viewport
= lp_llvm_viewport(context_ptr
, gallivm
, viewport_index
);
272 /* viewports[viewport_index].min_depth */
273 min_depth
= LLVMBuildExtractElement(builder
, viewport
,
274 lp_build_const_int32(gallivm
, LP_JIT_VIEWPORT_MIN_DEPTH
), "");
275 min_depth
= lp_build_broadcast_scalar(&f32_bld
, min_depth
);
277 /* viewports[viewport_index].max_depth */
278 max_depth
= LLVMBuildExtractElement(builder
, viewport
,
279 lp_build_const_int32(gallivm
, LP_JIT_VIEWPORT_MAX_DEPTH
), "");
280 max_depth
= lp_build_broadcast_scalar(&f32_bld
, max_depth
);
283 * Clamp to the min and max depth values for the given viewport.
285 return lp_build_clamp(&f32_bld
, z
, min_depth
, max_depth
);
290 * Generate the fragment shader, depth/stencil test, and alpha tests.
293 generate_fs_loop(struct gallivm_state
*gallivm
,
294 struct lp_fragment_shader
*shader
,
295 const struct lp_fragment_shader_variant_key
*key
,
296 LLVMBuilderRef builder
,
298 LLVMValueRef context_ptr
,
299 LLVMValueRef num_loop
,
300 struct lp_build_interp_soa_context
*interp
,
301 struct lp_build_sampler_soa
*sampler
,
302 LLVMValueRef mask_store
,
303 LLVMValueRef (*out_color
)[4],
304 LLVMValueRef depth_ptr
,
305 LLVMValueRef depth_stride
,
307 LLVMValueRef thread_data_ptr
)
309 const struct util_format_description
*zs_format_desc
= NULL
;
310 const struct tgsi_token
*tokens
= shader
->base
.tokens
;
311 struct lp_type int_type
= lp_int_type(type
);
312 LLVMTypeRef vec_type
, int_vec_type
;
313 LLVMValueRef mask_ptr
, mask_val
;
314 LLVMValueRef consts_ptr
, num_consts_ptr
;
316 LLVMValueRef z_value
, s_value
;
317 LLVMValueRef z_fb
, s_fb
;
318 LLVMValueRef stencil_refs
[2];
319 LLVMValueRef outputs
[PIPE_MAX_SHADER_OUTPUTS
][TGSI_NUM_CHANNELS
];
320 struct lp_build_for_loop_state loop_state
;
321 struct lp_build_mask_context mask
;
323 * TODO: figure out if simple_shader optimization is really worthwile to
324 * keep. Disabled because it may hide some real bugs in the (depth/stencil)
325 * code since tests tend to take another codepath than real shaders.
327 boolean simple_shader
= (shader
->info
.base
.file_count
[TGSI_FILE_SAMPLER
] == 0 &&
328 shader
->info
.base
.num_inputs
< 3 &&
329 shader
->info
.base
.num_instructions
< 8) && 0;
330 const boolean dual_source_blend
= key
->blend
.rt
[0].blend_enable
&&
331 util_blend_state_is_dual(&key
->blend
, 0);
337 struct lp_bld_tgsi_system_values system_values
;
339 memset(&system_values
, 0, sizeof(system_values
));
341 if (key
->depth
.enabled
||
342 key
->stencil
[0].enabled
) {
344 zs_format_desc
= util_format_description(key
->zsbuf_format
);
345 assert(zs_format_desc
);
347 if (!shader
->info
.base
.writes_z
&& !shader
->info
.base
.writes_stencil
) {
348 if (key
->alpha
.enabled
||
349 key
->blend
.alpha_to_coverage
||
350 shader
->info
.base
.uses_kill
) {
351 /* With alpha test and kill, can do the depth test early
352 * and hopefully eliminate some quads. But need to do a
353 * special deferred depth write once the final mask value
354 * is known. This only works though if there's either no
355 * stencil test or the stencil value isn't written.
357 if (key
->stencil
[0].enabled
&& (key
->stencil
[0].writemask
||
358 (key
->stencil
[1].enabled
&&
359 key
->stencil
[1].writemask
)))
360 depth_mode
= LATE_DEPTH_TEST
| LATE_DEPTH_WRITE
;
362 depth_mode
= EARLY_DEPTH_TEST
| LATE_DEPTH_WRITE
;
365 depth_mode
= EARLY_DEPTH_TEST
| EARLY_DEPTH_WRITE
;
368 depth_mode
= LATE_DEPTH_TEST
| LATE_DEPTH_WRITE
;
371 if (!(key
->depth
.enabled
&& key
->depth
.writemask
) &&
372 !(key
->stencil
[0].enabled
&& (key
->stencil
[0].writemask
||
373 (key
->stencil
[1].enabled
&&
374 key
->stencil
[1].writemask
))))
375 depth_mode
&= ~(LATE_DEPTH_WRITE
| EARLY_DEPTH_WRITE
);
381 vec_type
= lp_build_vec_type(gallivm
, type
);
382 int_vec_type
= lp_build_vec_type(gallivm
, int_type
);
384 stencil_refs
[0] = lp_jit_context_stencil_ref_front_value(gallivm
, context_ptr
);
385 stencil_refs
[1] = lp_jit_context_stencil_ref_back_value(gallivm
, context_ptr
);
386 /* convert scalar stencil refs into vectors */
387 stencil_refs
[0] = lp_build_broadcast(gallivm
, int_vec_type
, stencil_refs
[0]);
388 stencil_refs
[1] = lp_build_broadcast(gallivm
, int_vec_type
, stencil_refs
[1]);
390 consts_ptr
= lp_jit_context_constants(gallivm
, context_ptr
);
391 num_consts_ptr
= lp_jit_context_num_constants(gallivm
, context_ptr
);
393 lp_build_for_loop_begin(&loop_state
, gallivm
,
394 lp_build_const_int32(gallivm
, 0),
397 lp_build_const_int32(gallivm
, 1));
399 mask_ptr
= LLVMBuildGEP(builder
, mask_store
,
400 &loop_state
.counter
, 1, "mask_ptr");
401 mask_val
= LLVMBuildLoad(builder
, mask_ptr
, "");
403 memset(outputs
, 0, sizeof outputs
);
405 for(cbuf
= 0; cbuf
< key
->nr_cbufs
; cbuf
++) {
406 for(chan
= 0; chan
< TGSI_NUM_CHANNELS
; ++chan
) {
407 out_color
[cbuf
][chan
] = lp_build_array_alloca(gallivm
,
408 lp_build_vec_type(gallivm
,
413 if (dual_source_blend
) {
414 assert(key
->nr_cbufs
<= 1);
415 for(chan
= 0; chan
< TGSI_NUM_CHANNELS
; ++chan
) {
416 out_color
[1][chan
] = lp_build_array_alloca(gallivm
,
417 lp_build_vec_type(gallivm
,
424 /* 'mask' will control execution based on quad's pixel alive/killed state */
425 lp_build_mask_begin(&mask
, gallivm
, type
, mask_val
);
427 if (!(depth_mode
& EARLY_DEPTH_TEST
) && !simple_shader
)
428 lp_build_mask_check(&mask
);
430 lp_build_interp_soa_update_pos_dyn(interp
, gallivm
, loop_state
.counter
);
433 if (depth_mode
& EARLY_DEPTH_TEST
) {
435 * Clamp according to ARB_depth_clamp semantics.
437 if (key
->depth_clamp
) {
438 z
= lp_build_depth_clamp(gallivm
, builder
, type
, context_ptr
,
441 lp_build_depth_stencil_load_swizzled(gallivm
, type
,
442 zs_format_desc
, key
->resource_1d
,
443 depth_ptr
, depth_stride
,
444 &z_fb
, &s_fb
, loop_state
.counter
);
445 lp_build_depth_stencil_test(gallivm
,
457 if (depth_mode
& EARLY_DEPTH_WRITE
) {
458 lp_build_depth_stencil_write_swizzled(gallivm
, type
,
459 zs_format_desc
, key
->resource_1d
,
460 NULL
, NULL
, NULL
, loop_state
.counter
,
461 depth_ptr
, depth_stride
,
465 * Note mask check if stencil is enabled must be after ds write not after
466 * stencil test otherwise new stencil values may not get written if all
467 * fragments got killed by depth/stencil test.
469 if (!simple_shader
&& key
->stencil
[0].enabled
)
470 lp_build_mask_check(&mask
);
473 lp_build_interp_soa_update_inputs_dyn(interp
, gallivm
, loop_state
.counter
);
475 /* Build the actual shader */
476 lp_build_tgsi_soa(gallivm
, tokens
, type
, &mask
,
477 consts_ptr
, num_consts_ptr
, &system_values
,
479 outputs
, context_ptr
, thread_data_ptr
,
480 sampler
, &shader
->info
.base
, NULL
);
483 if (key
->alpha
.enabled
) {
484 int color0
= find_output_by_semantic(&shader
->info
.base
,
488 if (color0
!= -1 && outputs
[color0
][3]) {
489 const struct util_format_description
*cbuf_format_desc
;
490 LLVMValueRef alpha
= LLVMBuildLoad(builder
, outputs
[color0
][3], "alpha");
491 LLVMValueRef alpha_ref_value
;
493 alpha_ref_value
= lp_jit_context_alpha_ref_value(gallivm
, context_ptr
);
494 alpha_ref_value
= lp_build_broadcast(gallivm
, vec_type
, alpha_ref_value
);
496 cbuf_format_desc
= util_format_description(key
->cbuf_format
[0]);
498 lp_build_alpha_test(gallivm
, key
->alpha
.func
, type
, cbuf_format_desc
,
499 &mask
, alpha
, alpha_ref_value
,
500 (depth_mode
& LATE_DEPTH_TEST
) != 0);
504 /* Emulate Alpha to Coverage with Alpha test */
505 if (key
->blend
.alpha_to_coverage
) {
506 int color0
= find_output_by_semantic(&shader
->info
.base
,
510 if (color0
!= -1 && outputs
[color0
][3]) {
511 LLVMValueRef alpha
= LLVMBuildLoad(builder
, outputs
[color0
][3], "alpha");
513 lp_build_alpha_to_coverage(gallivm
, type
,
515 (depth_mode
& LATE_DEPTH_TEST
) != 0);
520 if (depth_mode
& LATE_DEPTH_TEST
) {
521 int pos0
= find_output_by_semantic(&shader
->info
.base
,
522 TGSI_SEMANTIC_POSITION
,
524 int s_out
= find_output_by_semantic(&shader
->info
.base
,
525 TGSI_SEMANTIC_STENCIL
,
527 if (pos0
!= -1 && outputs
[pos0
][2]) {
528 z
= LLVMBuildLoad(builder
, outputs
[pos0
][2], "output.z");
531 * Clamp according to ARB_depth_clamp semantics.
533 if (key
->depth_clamp
) {
534 z
= lp_build_depth_clamp(gallivm
, builder
, type
, context_ptr
,
538 if (s_out
!= -1 && outputs
[s_out
][1]) {
539 /* there's only one value, and spec says to discard additional bits */
540 LLVMValueRef s_max_mask
= lp_build_const_int_vec(gallivm
, int_type
, 255);
541 stencil_refs
[0] = LLVMBuildLoad(builder
, outputs
[s_out
][1], "output.s");
542 stencil_refs
[0] = LLVMBuildBitCast(builder
, stencil_refs
[0], int_vec_type
, "");
543 stencil_refs
[0] = LLVMBuildAnd(builder
, stencil_refs
[0], s_max_mask
, "");
544 stencil_refs
[1] = stencil_refs
[0];
547 lp_build_depth_stencil_load_swizzled(gallivm
, type
,
548 zs_format_desc
, key
->resource_1d
,
549 depth_ptr
, depth_stride
,
550 &z_fb
, &s_fb
, loop_state
.counter
);
552 lp_build_depth_stencil_test(gallivm
,
564 if (depth_mode
& LATE_DEPTH_WRITE
) {
565 lp_build_depth_stencil_write_swizzled(gallivm
, type
,
566 zs_format_desc
, key
->resource_1d
,
567 NULL
, NULL
, NULL
, loop_state
.counter
,
568 depth_ptr
, depth_stride
,
572 else if ((depth_mode
& EARLY_DEPTH_TEST
) &&
573 (depth_mode
& LATE_DEPTH_WRITE
))
575 /* Need to apply a reduced mask to the depth write. Reload the
576 * depth value, update from zs_value with the new mask value and
579 lp_build_depth_stencil_write_swizzled(gallivm
, type
,
580 zs_format_desc
, key
->resource_1d
,
581 &mask
, z_fb
, s_fb
, loop_state
.counter
,
582 depth_ptr
, depth_stride
,
588 for (attrib
= 0; attrib
< shader
->info
.base
.num_outputs
; ++attrib
)
590 unsigned cbuf
= shader
->info
.base
.output_semantic_index
[attrib
];
591 if ((shader
->info
.base
.output_semantic_name
[attrib
] == TGSI_SEMANTIC_COLOR
) &&
592 ((cbuf
< key
->nr_cbufs
) || (cbuf
== 1 && dual_source_blend
)))
594 for(chan
= 0; chan
< TGSI_NUM_CHANNELS
; ++chan
) {
595 if(outputs
[attrib
][chan
]) {
596 /* XXX: just initialize outputs to point at colors[] and
599 LLVMValueRef out
= LLVMBuildLoad(builder
, outputs
[attrib
][chan
], "");
600 LLVMValueRef color_ptr
;
601 color_ptr
= LLVMBuildGEP(builder
, out_color
[cbuf
][chan
],
602 &loop_state
.counter
, 1, "");
603 lp_build_name(out
, "color%u.%c", attrib
, "rgba"[chan
]);
604 LLVMBuildStore(builder
, out
, color_ptr
);
610 if (key
->occlusion_count
) {
611 LLVMValueRef counter
= lp_jit_thread_data_counter(gallivm
, thread_data_ptr
);
612 lp_build_name(counter
, "counter");
613 lp_build_occlusion_count(gallivm
, type
,
614 lp_build_mask_value(&mask
), counter
);
617 mask_val
= lp_build_mask_end(&mask
);
618 LLVMBuildStore(builder
, mask_val
, mask_ptr
);
619 lp_build_for_loop_end(&loop_state
);
624 * This function will reorder pixels from the fragment shader SoA to memory layout AoS
626 * Fragment Shader outputs pixels in small 2x2 blocks
627 * e.g. (0, 0), (1, 0), (0, 1), (1, 1) ; (2, 0) ...
629 * However in memory pixels are stored in rows
630 * e.g. (0, 0), (1, 0), (2, 0), (3, 0) ; (0, 1) ...
632 * @param type fragment shader type (4x or 8x float)
633 * @param num_fs number of fs_src
634 * @param is_1d whether we're outputting to a 1d resource
635 * @param dst_channels number of output channels
636 * @param fs_src output from fragment shader
637 * @param dst pointer to store result
638 * @param pad_inline is channel padding inline or at end of row
639 * @return the number of dsts
642 generate_fs_twiddle(struct gallivm_state
*gallivm
,
645 unsigned dst_channels
,
646 LLVMValueRef fs_src
[][4],
650 LLVMValueRef src
[16];
656 unsigned pixels
= type
.length
/ 4;
657 unsigned reorder_group
;
658 unsigned src_channels
;
662 src_channels
= dst_channels
< 3 ? dst_channels
: 4;
663 src_count
= num_fs
* src_channels
;
665 assert(pixels
== 2 || pixels
== 1);
666 assert(num_fs
* src_channels
<= ARRAY_SIZE(src
));
669 * Transpose from SoA -> AoS
671 for (i
= 0; i
< num_fs
; ++i
) {
672 lp_build_transpose_aos_n(gallivm
, type
, &fs_src
[i
][0], src_channels
, &src
[i
* src_channels
]);
676 * Pick transformation options
683 if (dst_channels
== 1) {
689 } else if (dst_channels
== 2) {
693 } else if (dst_channels
> 2) {
700 if (!pad_inline
&& dst_channels
== 3 && pixels
> 1) {
706 * Split the src in half
709 for (i
= num_fs
; i
> 0; --i
) {
710 src
[(i
- 1)*2 + 1] = lp_build_extract_range(gallivm
, src
[i
- 1], 4, 4);
711 src
[(i
- 1)*2 + 0] = lp_build_extract_range(gallivm
, src
[i
- 1], 0, 4);
719 * Ensure pixels are in memory order
722 /* Twiddle pixels by reordering the array, e.g.:
724 * src_count = 8 -> 0 2 1 3 4 6 5 7
725 * src_count = 16 -> 0 1 4 5 2 3 6 7 8 9 12 13 10 11 14 15
727 const unsigned reorder_sw
[] = { 0, 2, 1, 3 };
729 for (i
= 0; i
< src_count
; ++i
) {
730 unsigned group
= i
/ reorder_group
;
731 unsigned block
= (group
/ 4) * 4 * reorder_group
;
732 unsigned j
= block
+ (reorder_sw
[group
% 4] * reorder_group
) + (i
% reorder_group
);
735 } else if (twiddle
) {
736 /* Twiddle pixels across elements of array */
737 lp_bld_quad_twiddle(gallivm
, type
, src
, src_count
, dst
);
740 memcpy(dst
, src
, sizeof(LLVMValueRef
) * src_count
);
744 * Moves any padding between pixels to the end
745 * e.g. RGBXRGBX -> RGBRGBXX
748 unsigned char swizzles
[16];
749 unsigned elems
= pixels
* dst_channels
;
751 for (i
= 0; i
< type
.length
; ++i
) {
753 swizzles
[i
] = i
% dst_channels
+ (i
/ dst_channels
) * 4;
755 swizzles
[i
] = LP_BLD_SWIZZLE_DONTCARE
;
758 for (i
= 0; i
< src_count
; ++i
) {
759 dst
[i
] = lp_build_swizzle_aos_n(gallivm
, dst
[i
], swizzles
, type
.length
, type
.length
);
768 * Load an unswizzled block of pixels from memory
771 load_unswizzled_block(struct gallivm_state
*gallivm
,
772 LLVMValueRef base_ptr
,
774 unsigned block_width
,
775 unsigned block_height
,
777 struct lp_type dst_type
,
779 unsigned dst_alignment
)
781 LLVMBuilderRef builder
= gallivm
->builder
;
782 unsigned row_size
= dst_count
/ block_height
;
785 /* Ensure block exactly fits into dst */
786 assert((block_width
* block_height
) % dst_count
== 0);
788 for (i
= 0; i
< dst_count
; ++i
) {
789 unsigned x
= i
% row_size
;
790 unsigned y
= i
/ row_size
;
792 LLVMValueRef bx
= lp_build_const_int32(gallivm
, x
* (dst_type
.width
/ 8) * dst_type
.length
);
793 LLVMValueRef by
= LLVMBuildMul(builder
, lp_build_const_int32(gallivm
, y
), stride
, "");
796 LLVMValueRef dst_ptr
;
798 gep
[0] = lp_build_const_int32(gallivm
, 0);
799 gep
[1] = LLVMBuildAdd(builder
, bx
, by
, "");
801 dst_ptr
= LLVMBuildGEP(builder
, base_ptr
, gep
, 2, "");
802 dst_ptr
= LLVMBuildBitCast(builder
, dst_ptr
, LLVMPointerType(lp_build_vec_type(gallivm
, dst_type
), 0), "");
804 dst
[i
] = LLVMBuildLoad(builder
, dst_ptr
, "");
806 LLVMSetAlignment(dst
[i
], dst_alignment
);
812 * Store an unswizzled block of pixels to memory
815 store_unswizzled_block(struct gallivm_state
*gallivm
,
816 LLVMValueRef base_ptr
,
818 unsigned block_width
,
819 unsigned block_height
,
821 struct lp_type src_type
,
823 unsigned src_alignment
)
825 LLVMBuilderRef builder
= gallivm
->builder
;
826 unsigned row_size
= src_count
/ block_height
;
829 /* Ensure src exactly fits into block */
830 assert((block_width
* block_height
) % src_count
== 0);
832 for (i
= 0; i
< src_count
; ++i
) {
833 unsigned x
= i
% row_size
;
834 unsigned y
= i
/ row_size
;
836 LLVMValueRef bx
= lp_build_const_int32(gallivm
, x
* (src_type
.width
/ 8) * src_type
.length
);
837 LLVMValueRef by
= LLVMBuildMul(builder
, lp_build_const_int32(gallivm
, y
), stride
, "");
840 LLVMValueRef src_ptr
;
842 gep
[0] = lp_build_const_int32(gallivm
, 0);
843 gep
[1] = LLVMBuildAdd(builder
, bx
, by
, "");
845 src_ptr
= LLVMBuildGEP(builder
, base_ptr
, gep
, 2, "");
846 src_ptr
= LLVMBuildBitCast(builder
, src_ptr
, LLVMPointerType(lp_build_vec_type(gallivm
, src_type
), 0), "");
848 src_ptr
= LLVMBuildStore(builder
, src
[i
], src_ptr
);
850 LLVMSetAlignment(src_ptr
, src_alignment
);
856 * Checks if a format description is an arithmetic format
858 * A format which has irregular channel sizes such as R3_G3_B2 or R5_G6_B5.
860 static inline boolean
861 is_arithmetic_format(const struct util_format_description
*format_desc
)
863 boolean arith
= false;
866 for (i
= 0; i
< format_desc
->nr_channels
; ++i
) {
867 arith
|= format_desc
->channel
[i
].size
!= format_desc
->channel
[0].size
;
868 arith
|= (format_desc
->channel
[i
].size
% 8) != 0;
876 * Checks if this format requires special handling due to required expansion
877 * to floats for blending, and furthermore has "natural" packed AoS -> unpacked
880 static inline boolean
881 format_expands_to_float_soa(const struct util_format_description
*format_desc
)
883 if (format_desc
->format
== PIPE_FORMAT_R11G11B10_FLOAT
||
884 format_desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
) {
892 * Retrieves the type representing the memory layout for a format
894 * e.g. RGBA16F = 4x half-float and R3G3B2 = 1x byte
897 lp_mem_type_from_format_desc(const struct util_format_description
*format_desc
,
898 struct lp_type
* type
)
903 if (format_expands_to_float_soa(format_desc
)) {
904 /* just make this a uint with width of block */
905 type
->floating
= false;
909 type
->width
= format_desc
->block
.bits
;
914 for (i
= 0; i
< 4; i
++)
915 if (format_desc
->channel
[i
].type
!= UTIL_FORMAT_TYPE_VOID
)
919 memset(type
, 0, sizeof(struct lp_type
));
920 type
->floating
= format_desc
->channel
[chan
].type
== UTIL_FORMAT_TYPE_FLOAT
;
921 type
->fixed
= format_desc
->channel
[chan
].type
== UTIL_FORMAT_TYPE_FIXED
;
922 type
->sign
= format_desc
->channel
[chan
].type
!= UTIL_FORMAT_TYPE_UNSIGNED
;
923 type
->norm
= format_desc
->channel
[chan
].normalized
;
925 if (is_arithmetic_format(format_desc
)) {
929 for (i
= 0; i
< format_desc
->nr_channels
; ++i
) {
930 type
->width
+= format_desc
->channel
[i
].size
;
933 type
->width
= format_desc
->channel
[chan
].size
;
934 type
->length
= format_desc
->nr_channels
;
940 * Retrieves the type for a format which is usable in the blending code.
942 * e.g. RGBA16F = 4x float, R3G3B2 = 3x byte
945 lp_blend_type_from_format_desc(const struct util_format_description
*format_desc
,
946 struct lp_type
* type
)
951 if (format_expands_to_float_soa(format_desc
)) {
952 /* always use ordinary floats for blending */
953 type
->floating
= true;
962 for (i
= 0; i
< 4; i
++)
963 if (format_desc
->channel
[i
].type
!= UTIL_FORMAT_TYPE_VOID
)
967 memset(type
, 0, sizeof(struct lp_type
));
968 type
->floating
= format_desc
->channel
[chan
].type
== UTIL_FORMAT_TYPE_FLOAT
;
969 type
->fixed
= format_desc
->channel
[chan
].type
== UTIL_FORMAT_TYPE_FIXED
;
970 type
->sign
= format_desc
->channel
[chan
].type
!= UTIL_FORMAT_TYPE_UNSIGNED
;
971 type
->norm
= format_desc
->channel
[chan
].normalized
;
972 type
->width
= format_desc
->channel
[chan
].size
;
973 type
->length
= format_desc
->nr_channels
;
975 for (i
= 1; i
< format_desc
->nr_channels
; ++i
) {
976 if (format_desc
->channel
[i
].size
> type
->width
)
977 type
->width
= format_desc
->channel
[i
].size
;
980 if (type
->floating
) {
983 if (type
->width
<= 8) {
985 } else if (type
->width
<= 16) {
992 if (is_arithmetic_format(format_desc
) && type
->length
== 3) {
999 * Scale a normalized value from src_bits to dst_bits.
1001 * The exact calculation is
1003 * dst = iround(src * dst_mask / src_mask)
1005 * or with integer rounding
1007 * dst = src * (2*dst_mask + sign(src)*src_mask) / (2*src_mask)
1011 * src_mask = (1 << src_bits) - 1
1012 * dst_mask = (1 << dst_bits) - 1
1014 * but we try to avoid division and multiplication through shifts.
1016 static inline LLVMValueRef
1017 scale_bits(struct gallivm_state
*gallivm
,
1021 struct lp_type src_type
)
1023 LLVMBuilderRef builder
= gallivm
->builder
;
1024 LLVMValueRef result
= src
;
1026 if (dst_bits
< src_bits
) {
1027 int delta_bits
= src_bits
- dst_bits
;
1029 if (delta_bits
<= dst_bits
) {
1031 * Approximate the rescaling with a single shift.
1033 * This gives the wrong rounding.
1036 result
= LLVMBuildLShr(builder
,
1038 lp_build_const_int_vec(gallivm
, src_type
, delta_bits
),
1043 * Try more accurate rescaling.
1047 * Drop the least significant bits to make space for the multiplication.
1049 * XXX: A better approach would be to use a wider integer type as intermediate. But
1050 * this is enough to convert alpha from 16bits -> 2 when rendering to
1051 * PIPE_FORMAT_R10G10B10A2_UNORM.
1053 result
= LLVMBuildLShr(builder
,
1055 lp_build_const_int_vec(gallivm
, src_type
, dst_bits
),
1059 result
= LLVMBuildMul(builder
,
1061 lp_build_const_int_vec(gallivm
, src_type
, (1LL << dst_bits
) - 1),
1065 * Add a rounding term before the division.
1067 * TODO: Handle signed integers too.
1069 if (!src_type
.sign
) {
1070 result
= LLVMBuildAdd(builder
,
1072 lp_build_const_int_vec(gallivm
, src_type
, (1LL << (delta_bits
- 1))),
1077 * Approximate the division by src_mask with a src_bits shift.
1079 * Given the src has already been shifted by dst_bits, all we need
1080 * to do is to shift by the difference.
1083 result
= LLVMBuildLShr(builder
,
1085 lp_build_const_int_vec(gallivm
, src_type
, delta_bits
),
1089 } else if (dst_bits
> src_bits
) {
1091 int db
= dst_bits
- src_bits
;
1093 /* Shift left by difference in bits */
1094 result
= LLVMBuildShl(builder
,
1096 lp_build_const_int_vec(gallivm
, src_type
, db
),
1099 if (db
< src_bits
) {
1100 /* Enough bits in src to fill the remainder */
1101 LLVMValueRef lower
= LLVMBuildLShr(builder
,
1103 lp_build_const_int_vec(gallivm
, src_type
, src_bits
- db
),
1106 result
= LLVMBuildOr(builder
, result
, lower
, "");
1107 } else if (db
> src_bits
) {
1108 /* Need to repeatedly copy src bits to fill remainder in dst */
1111 for (n
= src_bits
; n
< dst_bits
; n
*= 2) {
1112 LLVMValueRef shuv
= lp_build_const_int_vec(gallivm
, src_type
, n
);
1114 result
= LLVMBuildOr(builder
,
1116 LLVMBuildLShr(builder
, result
, shuv
, ""),
1126 * If RT is a smallfloat (needing denorms) format
1129 have_smallfloat_format(struct lp_type dst_type
,
1130 enum pipe_format format
)
1132 return ((dst_type
.floating
&& dst_type
.width
!= 32) ||
1133 /* due to format handling hacks this format doesn't have floating set
1134 * here (and actually has width set to 32 too) so special case this. */
1135 (format
== PIPE_FORMAT_R11G11B10_FLOAT
));
1140 * Convert from memory format to blending format
1142 * e.g. GL_R3G3B2 is 1 byte in memory but 3 bytes for blending
1145 convert_to_blend_type(struct gallivm_state
*gallivm
,
1146 unsigned block_size
,
1147 const struct util_format_description
*src_fmt
,
1148 struct lp_type src_type
,
1149 struct lp_type dst_type
,
1150 LLVMValueRef
* src
, // and dst
1153 LLVMValueRef
*dst
= src
;
1154 LLVMBuilderRef builder
= gallivm
->builder
;
1155 struct lp_type blend_type
;
1156 struct lp_type mem_type
;
1158 unsigned pixels
= block_size
/ num_srcs
;
1162 * full custom path for packed floats and srgb formats - none of the later
1163 * functions would do anything useful, and given the lp_type representation they
1164 * can't be fixed. Should really have some SoA blend path for these kind of
1165 * formats rather than hacking them in here.
1167 if (format_expands_to_float_soa(src_fmt
)) {
1168 LLVMValueRef tmpsrc
[4];
1170 * This is pretty suboptimal for this case blending in SoA would be much
1171 * better, since conversion gets us SoA values so need to convert back.
1173 assert(src_type
.width
== 32 || src_type
.width
== 16);
1174 assert(dst_type
.floating
);
1175 assert(dst_type
.width
== 32);
1176 assert(dst_type
.length
% 4 == 0);
1177 assert(num_srcs
% 4 == 0);
1179 if (src_type
.width
== 16) {
1180 /* expand 4x16bit values to 4x32bit */
1181 struct lp_type type32x4
= src_type
;
1182 LLVMTypeRef ltype32x4
;
1183 unsigned num_fetch
= dst_type
.length
== 8 ? num_srcs
/ 2 : num_srcs
/ 4;
1184 type32x4
.width
= 32;
1185 ltype32x4
= lp_build_vec_type(gallivm
, type32x4
);
1186 for (i
= 0; i
< num_fetch
; i
++) {
1187 src
[i
] = LLVMBuildZExt(builder
, src
[i
], ltype32x4
, "");
1189 src_type
.width
= 32;
1191 for (i
= 0; i
< 4; i
++) {
1194 for (i
= 0; i
< num_srcs
/ 4; i
++) {
1195 LLVMValueRef tmpsoa
[4];
1196 LLVMValueRef tmps
= tmpsrc
[i
];
1197 if (dst_type
.length
== 8) {
1198 LLVMValueRef shuffles
[8];
1200 /* fetch was 4 values but need 8-wide output values */
1201 tmps
= lp_build_concat(gallivm
, &tmpsrc
[i
* 2], src_type
, 2);
1203 * for 8-wide aos transpose would give us wrong order not matching
1204 * incoming converted fs values and mask. ARGH.
1206 for (j
= 0; j
< 4; j
++) {
1207 shuffles
[j
] = lp_build_const_int32(gallivm
, j
* 2);
1208 shuffles
[j
+ 4] = lp_build_const_int32(gallivm
, j
* 2 + 1);
1210 tmps
= LLVMBuildShuffleVector(builder
, tmps
, tmps
,
1211 LLVMConstVector(shuffles
, 8), "");
1213 if (src_fmt
->format
== PIPE_FORMAT_R11G11B10_FLOAT
) {
1214 lp_build_r11g11b10_to_float(gallivm
, tmps
, tmpsoa
);
1217 lp_build_unpack_rgba_soa(gallivm
, src_fmt
, dst_type
, tmps
, tmpsoa
);
1219 lp_build_transpose_aos(gallivm
, dst_type
, tmpsoa
, &src
[i
* 4]);
1224 lp_mem_type_from_format_desc(src_fmt
, &mem_type
);
1225 lp_blend_type_from_format_desc(src_fmt
, &blend_type
);
1227 /* Is the format arithmetic */
1228 is_arith
= blend_type
.length
* blend_type
.width
!= mem_type
.width
* mem_type
.length
;
1229 is_arith
&= !(mem_type
.width
== 16 && mem_type
.floating
);
1231 /* Pad if necessary */
1232 if (!is_arith
&& src_type
.length
< dst_type
.length
) {
1233 for (i
= 0; i
< num_srcs
; ++i
) {
1234 dst
[i
] = lp_build_pad_vector(gallivm
, src
[i
], dst_type
.length
);
1237 src_type
.length
= dst_type
.length
;
1240 /* Special case for half-floats */
1241 if (mem_type
.width
== 16 && mem_type
.floating
) {
1242 assert(blend_type
.width
== 32 && blend_type
.floating
);
1243 lp_build_conv_auto(gallivm
, src_type
, &dst_type
, dst
, num_srcs
, dst
);
1251 src_type
.width
= blend_type
.width
* blend_type
.length
;
1252 blend_type
.length
*= pixels
;
1253 src_type
.length
*= pixels
/ (src_type
.length
/ mem_type
.length
);
1255 for (i
= 0; i
< num_srcs
; ++i
) {
1256 LLVMValueRef chans
[4];
1257 LLVMValueRef res
= NULL
;
1259 dst
[i
] = LLVMBuildZExt(builder
, src
[i
], lp_build_vec_type(gallivm
, src_type
), "");
1261 for (j
= 0; j
< src_fmt
->nr_channels
; ++j
) {
1263 unsigned sa
= src_fmt
->channel
[j
].shift
;
1264 #ifdef PIPE_ARCH_LITTLE_ENDIAN
1265 unsigned from_lsb
= j
;
1267 unsigned from_lsb
= src_fmt
->nr_channels
- j
- 1;
1270 for (k
= 0; k
< src_fmt
->channel
[j
].size
; ++k
) {
1274 /* Extract bits from source */
1275 chans
[j
] = LLVMBuildLShr(builder
,
1277 lp_build_const_int_vec(gallivm
, src_type
, sa
),
1280 chans
[j
] = LLVMBuildAnd(builder
,
1282 lp_build_const_int_vec(gallivm
, src_type
, mask
),
1286 if (src_type
.norm
) {
1287 chans
[j
] = scale_bits(gallivm
, src_fmt
->channel
[j
].size
,
1288 blend_type
.width
, chans
[j
], src_type
);
1291 /* Insert bits into correct position */
1292 chans
[j
] = LLVMBuildShl(builder
,
1294 lp_build_const_int_vec(gallivm
, src_type
, from_lsb
* blend_type
.width
),
1300 res
= LLVMBuildOr(builder
, res
, chans
[j
], "");
1304 dst
[i
] = LLVMBuildBitCast(builder
, res
, lp_build_vec_type(gallivm
, blend_type
), "");
1310 * Convert from blending format to memory format
1312 * e.g. GL_R3G3B2 is 3 bytes for blending but 1 byte in memory
1315 convert_from_blend_type(struct gallivm_state
*gallivm
,
1316 unsigned block_size
,
1317 const struct util_format_description
*src_fmt
,
1318 struct lp_type src_type
,
1319 struct lp_type dst_type
,
1320 LLVMValueRef
* src
, // and dst
1323 LLVMValueRef
* dst
= src
;
1325 struct lp_type mem_type
;
1326 struct lp_type blend_type
;
1327 LLVMBuilderRef builder
= gallivm
->builder
;
1328 unsigned pixels
= block_size
/ num_srcs
;
1332 * full custom path for packed floats and srgb formats - none of the later
1333 * functions would do anything useful, and given the lp_type representation they
1334 * can't be fixed. Should really have some SoA blend path for these kind of
1335 * formats rather than hacking them in here.
1337 if (format_expands_to_float_soa(src_fmt
)) {
1339 * This is pretty suboptimal for this case blending in SoA would be much
1340 * better - we need to transpose the AoS values back to SoA values for
1341 * conversion/packing.
1343 assert(src_type
.floating
);
1344 assert(src_type
.width
== 32);
1345 assert(src_type
.length
% 4 == 0);
1346 assert(dst_type
.width
== 32 || dst_type
.width
== 16);
1348 for (i
= 0; i
< num_srcs
/ 4; i
++) {
1349 LLVMValueRef tmpsoa
[4], tmpdst
;
1350 lp_build_transpose_aos(gallivm
, src_type
, &src
[i
* 4], tmpsoa
);
1351 /* really really need SoA here */
1353 if (src_fmt
->format
== PIPE_FORMAT_R11G11B10_FLOAT
) {
1354 tmpdst
= lp_build_float_to_r11g11b10(gallivm
, tmpsoa
);
1357 tmpdst
= lp_build_float_to_srgb_packed(gallivm
, src_fmt
,
1361 if (src_type
.length
== 8) {
1362 LLVMValueRef tmpaos
, shuffles
[8];
1365 * for 8-wide aos transpose has given us wrong order not matching
1366 * output order. HMPF. Also need to split the output values manually.
1368 for (j
= 0; j
< 4; j
++) {
1369 shuffles
[j
* 2] = lp_build_const_int32(gallivm
, j
);
1370 shuffles
[j
* 2 + 1] = lp_build_const_int32(gallivm
, j
+ 4);
1372 tmpaos
= LLVMBuildShuffleVector(builder
, tmpdst
, tmpdst
,
1373 LLVMConstVector(shuffles
, 8), "");
1374 src
[i
* 2] = lp_build_extract_range(gallivm
, tmpaos
, 0, 4);
1375 src
[i
* 2 + 1] = lp_build_extract_range(gallivm
, tmpaos
, 4, 4);
1381 if (dst_type
.width
== 16) {
1382 struct lp_type type16x8
= dst_type
;
1383 struct lp_type type32x4
= dst_type
;
1384 LLVMTypeRef ltype16x4
, ltypei64
, ltypei128
;
1385 unsigned num_fetch
= src_type
.length
== 8 ? num_srcs
/ 2 : num_srcs
/ 4;
1386 type16x8
.length
= 8;
1387 type32x4
.width
= 32;
1388 ltypei128
= LLVMIntTypeInContext(gallivm
->context
, 128);
1389 ltypei64
= LLVMIntTypeInContext(gallivm
->context
, 64);
1390 ltype16x4
= lp_build_vec_type(gallivm
, dst_type
);
1391 /* We could do vector truncation but it doesn't generate very good code */
1392 for (i
= 0; i
< num_fetch
; i
++) {
1393 src
[i
] = lp_build_pack2(gallivm
, type32x4
, type16x8
,
1394 src
[i
], lp_build_zero(gallivm
, type32x4
));
1395 src
[i
] = LLVMBuildBitCast(builder
, src
[i
], ltypei128
, "");
1396 src
[i
] = LLVMBuildTrunc(builder
, src
[i
], ltypei64
, "");
1397 src
[i
] = LLVMBuildBitCast(builder
, src
[i
], ltype16x4
, "");
1403 lp_mem_type_from_format_desc(src_fmt
, &mem_type
);
1404 lp_blend_type_from_format_desc(src_fmt
, &blend_type
);
1406 is_arith
= (blend_type
.length
* blend_type
.width
!= mem_type
.width
* mem_type
.length
);
1408 /* Special case for half-floats */
1409 if (mem_type
.width
== 16 && mem_type
.floating
) {
1410 int length
= dst_type
.length
;
1411 assert(blend_type
.width
== 32 && blend_type
.floating
);
1413 dst_type
.length
= src_type
.length
;
1415 lp_build_conv_auto(gallivm
, src_type
, &dst_type
, dst
, num_srcs
, dst
);
1417 dst_type
.length
= length
;
1421 /* Remove any padding */
1422 if (!is_arith
&& (src_type
.length
% mem_type
.length
)) {
1423 src_type
.length
-= (src_type
.length
% mem_type
.length
);
1425 for (i
= 0; i
< num_srcs
; ++i
) {
1426 dst
[i
] = lp_build_extract_range(gallivm
, dst
[i
], 0, src_type
.length
);
1430 /* No bit arithmetic to do */
1435 src_type
.length
= pixels
;
1436 src_type
.width
= blend_type
.length
* blend_type
.width
;
1437 dst_type
.length
= pixels
;
1439 for (i
= 0; i
< num_srcs
; ++i
) {
1440 LLVMValueRef chans
[4];
1441 LLVMValueRef res
= NULL
;
1443 dst
[i
] = LLVMBuildBitCast(builder
, src
[i
], lp_build_vec_type(gallivm
, src_type
), "");
1445 for (j
= 0; j
< src_fmt
->nr_channels
; ++j
) {
1447 unsigned sa
= src_fmt
->channel
[j
].shift
;
1448 #ifdef PIPE_ARCH_LITTLE_ENDIAN
1449 unsigned from_lsb
= j
;
1451 unsigned from_lsb
= src_fmt
->nr_channels
- j
- 1;
1454 assert(blend_type
.width
> src_fmt
->channel
[j
].size
);
1456 for (k
= 0; k
< blend_type
.width
; ++k
) {
1461 chans
[j
] = LLVMBuildLShr(builder
,
1463 lp_build_const_int_vec(gallivm
, src_type
, from_lsb
* blend_type
.width
),
1466 chans
[j
] = LLVMBuildAnd(builder
,
1468 lp_build_const_int_vec(gallivm
, src_type
, mask
),
1471 /* Scale down bits */
1472 if (src_type
.norm
) {
1473 chans
[j
] = scale_bits(gallivm
, blend_type
.width
,
1474 src_fmt
->channel
[j
].size
, chans
[j
], src_type
);
1478 chans
[j
] = LLVMBuildShl(builder
,
1480 lp_build_const_int_vec(gallivm
, src_type
, sa
),
1483 sa
+= src_fmt
->channel
[j
].size
;
1488 res
= LLVMBuildOr(builder
, res
, chans
[j
], "");
1492 assert (dst_type
.width
!= 24);
1494 dst
[i
] = LLVMBuildTrunc(builder
, res
, lp_build_vec_type(gallivm
, dst_type
), "");
1500 * Convert alpha to same blend type as src
1503 convert_alpha(struct gallivm_state
*gallivm
,
1504 struct lp_type row_type
,
1505 struct lp_type alpha_type
,
1506 const unsigned block_size
,
1507 const unsigned block_height
,
1508 const unsigned src_count
,
1509 const unsigned dst_channels
,
1510 const bool pad_inline
,
1511 LLVMValueRef
* src_alpha
)
1513 LLVMBuilderRef builder
= gallivm
->builder
;
1515 unsigned length
= row_type
.length
;
1516 row_type
.length
= alpha_type
.length
;
1518 /* Twiddle the alpha to match pixels */
1519 lp_bld_quad_twiddle(gallivm
, alpha_type
, src_alpha
, block_height
, src_alpha
);
1522 * TODO this should use single lp_build_conv call for
1523 * src_count == 1 && dst_channels == 1 case (dropping the concat below)
1525 for (i
= 0; i
< block_height
; ++i
) {
1526 lp_build_conv(gallivm
, alpha_type
, row_type
, &src_alpha
[i
], 1, &src_alpha
[i
], 1);
1529 alpha_type
= row_type
;
1530 row_type
.length
= length
;
1532 /* If only one channel we can only need the single alpha value per pixel */
1533 if (src_count
== 1 && dst_channels
== 1) {
1535 lp_build_concat_n(gallivm
, alpha_type
, src_alpha
, block_height
, src_alpha
, src_count
);
1537 /* If there are more srcs than rows then we need to split alpha up */
1538 if (src_count
> block_height
) {
1539 for (i
= src_count
; i
> 0; --i
) {
1540 unsigned pixels
= block_size
/ src_count
;
1541 unsigned idx
= i
- 1;
1543 src_alpha
[idx
] = lp_build_extract_range(gallivm
, src_alpha
[(idx
* pixels
) / 4],
1544 (idx
* pixels
) % 4, pixels
);
1548 /* If there is a src for each pixel broadcast the alpha across whole row */
1549 if (src_count
== block_size
) {
1550 for (i
= 0; i
< src_count
; ++i
) {
1551 src_alpha
[i
] = lp_build_broadcast(gallivm
, lp_build_vec_type(gallivm
, row_type
), src_alpha
[i
]);
1554 unsigned pixels
= block_size
/ src_count
;
1555 unsigned channels
= pad_inline
? TGSI_NUM_CHANNELS
: dst_channels
;
1556 unsigned alpha_span
= 1;
1557 LLVMValueRef shuffles
[LP_MAX_VECTOR_LENGTH
];
1559 /* Check if we need 2 src_alphas for our shuffles */
1560 if (pixels
> alpha_type
.length
) {
1564 /* Broadcast alpha across all channels, e.g. a1a2 to a1a1a1a1a2a2a2a2 */
1565 for (j
= 0; j
< row_type
.length
; ++j
) {
1566 if (j
< pixels
* channels
) {
1567 shuffles
[j
] = lp_build_const_int32(gallivm
, j
/ channels
);
1569 shuffles
[j
] = LLVMGetUndef(LLVMInt32TypeInContext(gallivm
->context
));
1573 for (i
= 0; i
< src_count
; ++i
) {
1574 unsigned idx1
= i
, idx2
= i
;
1576 if (alpha_span
> 1){
1581 src_alpha
[i
] = LLVMBuildShuffleVector(builder
,
1584 LLVMConstVector(shuffles
, row_type
.length
),
1593 * Generates the blend function for unswizzled colour buffers
1594 * Also generates the read & write from colour buffer
1597 generate_unswizzled_blend(struct gallivm_state
*gallivm
,
1599 struct lp_fragment_shader_variant
*variant
,
1600 enum pipe_format out_format
,
1601 unsigned int num_fs
,
1602 struct lp_type fs_type
,
1603 LLVMValueRef
* fs_mask
,
1604 LLVMValueRef fs_out_color
[PIPE_MAX_COLOR_BUFS
][TGSI_NUM_CHANNELS
][4],
1605 LLVMValueRef context_ptr
,
1606 LLVMValueRef color_ptr
,
1607 LLVMValueRef stride
,
1608 unsigned partial_mask
,
1611 const unsigned alpha_channel
= 3;
1612 const unsigned block_width
= LP_RASTER_BLOCK_SIZE
;
1613 const unsigned block_height
= LP_RASTER_BLOCK_SIZE
;
1614 const unsigned block_size
= block_width
* block_height
;
1615 const unsigned lp_integer_vector_width
= 128;
1617 LLVMBuilderRef builder
= gallivm
->builder
;
1618 LLVMValueRef fs_src
[4][TGSI_NUM_CHANNELS
];
1619 LLVMValueRef fs_src1
[4][TGSI_NUM_CHANNELS
];
1620 LLVMValueRef src_alpha
[4 * 4];
1621 LLVMValueRef src1_alpha
[4 * 4] = { NULL
};
1622 LLVMValueRef src_mask
[4 * 4];
1623 LLVMValueRef src
[4 * 4];
1624 LLVMValueRef src1
[4 * 4];
1625 LLVMValueRef dst
[4 * 4];
1626 LLVMValueRef blend_color
;
1627 LLVMValueRef blend_alpha
;
1628 LLVMValueRef i32_zero
;
1629 LLVMValueRef check_mask
;
1630 LLVMValueRef undef_src_val
;
1632 struct lp_build_mask_context mask_ctx
;
1633 struct lp_type mask_type
;
1634 struct lp_type blend_type
;
1635 struct lp_type row_type
;
1636 struct lp_type dst_type
;
1638 unsigned char swizzle
[TGSI_NUM_CHANNELS
];
1639 unsigned vector_width
;
1640 unsigned src_channels
= TGSI_NUM_CHANNELS
;
1641 unsigned dst_channels
;
1646 const struct util_format_description
* out_format_desc
= util_format_description(out_format
);
1648 unsigned dst_alignment
;
1650 bool pad_inline
= is_arithmetic_format(out_format_desc
);
1651 bool has_alpha
= false;
1652 const boolean dual_source_blend
= variant
->key
.blend
.rt
[0].blend_enable
&&
1653 util_blend_state_is_dual(&variant
->key
.blend
, 0);
1655 const boolean is_1d
= variant
->key
.resource_1d
;
1656 unsigned num_fullblock_fs
= is_1d
? 2 * num_fs
: num_fs
;
1657 LLVMValueRef fpstate
= 0;
1659 /* Get type from output format */
1660 lp_blend_type_from_format_desc(out_format_desc
, &row_type
);
1661 lp_mem_type_from_format_desc(out_format_desc
, &dst_type
);
1664 * Technically this code should go into lp_build_smallfloat_to_float
1665 * and lp_build_float_to_smallfloat but due to the
1666 * http://llvm.org/bugs/show_bug.cgi?id=6393
1667 * llvm reorders the mxcsr intrinsics in a way that breaks the code.
1668 * So the ordering is important here and there shouldn't be any
1669 * llvm ir instrunctions in this function before
1670 * this, otherwise half-float format conversions won't work
1671 * (again due to llvm bug #6393).
1673 if (have_smallfloat_format(dst_type
, out_format
)) {
1674 /* We need to make sure that denorms are ok for half float
1676 fpstate
= lp_build_fpstate_get(gallivm
);
1677 lp_build_fpstate_set_denorms_zero(gallivm
, FALSE
);
1680 mask_type
= lp_int32_vec4_type();
1681 mask_type
.length
= fs_type
.length
;
1683 for (i
= num_fs
; i
< num_fullblock_fs
; i
++) {
1684 fs_mask
[i
] = lp_build_zero(gallivm
, mask_type
);
1687 /* Do not bother executing code when mask is empty.. */
1689 check_mask
= LLVMConstNull(lp_build_int_vec_type(gallivm
, mask_type
));
1691 for (i
= 0; i
< num_fullblock_fs
; ++i
) {
1692 check_mask
= LLVMBuildOr(builder
, check_mask
, fs_mask
[i
], "");
1695 lp_build_mask_begin(&mask_ctx
, gallivm
, mask_type
, check_mask
);
1696 lp_build_mask_check(&mask_ctx
);
1699 partial_mask
|= !variant
->opaque
;
1700 i32_zero
= lp_build_const_int32(gallivm
, 0);
1702 undef_src_val
= lp_build_undef(gallivm
, fs_type
);
1704 row_type
.length
= fs_type
.length
;
1705 vector_width
= dst_type
.floating
? lp_native_vector_width
: lp_integer_vector_width
;
1707 /* Compute correct swizzle and count channels */
1708 memset(swizzle
, LP_BLD_SWIZZLE_DONTCARE
, TGSI_NUM_CHANNELS
);
1711 for (i
= 0; i
< TGSI_NUM_CHANNELS
; ++i
) {
1712 /* Ensure channel is used */
1713 if (out_format_desc
->swizzle
[i
] >= TGSI_NUM_CHANNELS
) {
1717 /* Ensure not already written to (happens in case with GL_ALPHA) */
1718 if (swizzle
[out_format_desc
->swizzle
[i
]] < TGSI_NUM_CHANNELS
) {
1722 /* Ensure we havn't already found all channels */
1723 if (dst_channels
>= out_format_desc
->nr_channels
) {
1727 swizzle
[out_format_desc
->swizzle
[i
]] = i
;
1730 if (i
== alpha_channel
) {
1735 if (format_expands_to_float_soa(out_format_desc
)) {
1737 * the code above can't work for layout_other
1738 * for srgb it would sort of work but we short-circuit swizzles, etc.
1739 * as that is done as part of unpack / pack.
1741 dst_channels
= 4; /* HACK: this is fake 4 really but need it due to transpose stuff later */
1747 pad_inline
= true; /* HACK: prevent rgbxrgbx->rgbrgbxx conversion later */
1750 /* If 3 channels then pad to include alpha for 4 element transpose */
1751 if (dst_channels
== 3 && !has_alpha
) {
1752 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++) {
1753 if (swizzle
[i
] > TGSI_NUM_CHANNELS
)
1756 if (out_format_desc
->nr_channels
== 4) {
1762 * Load shader output
1764 for (i
= 0; i
< num_fullblock_fs
; ++i
) {
1765 /* Always load alpha for use in blending */
1768 alpha
= LLVMBuildLoad(builder
, fs_out_color
[rt
][alpha_channel
][i
], "");
1771 alpha
= undef_src_val
;
1774 /* Load each channel */
1775 for (j
= 0; j
< dst_channels
; ++j
) {
1776 assert(swizzle
[j
] < 4);
1778 fs_src
[i
][j
] = LLVMBuildLoad(builder
, fs_out_color
[rt
][swizzle
[j
]][i
], "");
1781 fs_src
[i
][j
] = undef_src_val
;
1785 /* If 3 channels then pad to include alpha for 4 element transpose */
1787 * XXX If we include that here maybe could actually use it instead of
1788 * separate alpha for blending?
1790 if (dst_channels
== 3 && !has_alpha
) {
1791 fs_src
[i
][3] = alpha
;
1794 /* We split the row_mask and row_alpha as we want 128bit interleave */
1795 if (fs_type
.length
== 8) {
1796 src_mask
[i
*2 + 0] = lp_build_extract_range(gallivm
, fs_mask
[i
], 0, src_channels
);
1797 src_mask
[i
*2 + 1] = lp_build_extract_range(gallivm
, fs_mask
[i
], src_channels
, src_channels
);
1799 src_alpha
[i
*2 + 0] = lp_build_extract_range(gallivm
, alpha
, 0, src_channels
);
1800 src_alpha
[i
*2 + 1] = lp_build_extract_range(gallivm
, alpha
, src_channels
, src_channels
);
1802 src_mask
[i
] = fs_mask
[i
];
1803 src_alpha
[i
] = alpha
;
1806 if (dual_source_blend
) {
1807 /* same as above except different src/dst, skip masks and comments... */
1808 for (i
= 0; i
< num_fullblock_fs
; ++i
) {
1811 alpha
= LLVMBuildLoad(builder
, fs_out_color
[1][alpha_channel
][i
], "");
1814 alpha
= undef_src_val
;
1817 for (j
= 0; j
< dst_channels
; ++j
) {
1818 assert(swizzle
[j
] < 4);
1820 fs_src1
[i
][j
] = LLVMBuildLoad(builder
, fs_out_color
[1][swizzle
[j
]][i
], "");
1823 fs_src1
[i
][j
] = undef_src_val
;
1826 if (dst_channels
== 3 && !has_alpha
) {
1827 fs_src1
[i
][3] = alpha
;
1829 if (fs_type
.length
== 8) {
1830 src1_alpha
[i
*2 + 0] = lp_build_extract_range(gallivm
, alpha
, 0, src_channels
);
1831 src1_alpha
[i
*2 + 1] = lp_build_extract_range(gallivm
, alpha
, src_channels
, src_channels
);
1833 src1_alpha
[i
] = alpha
;
1838 if (util_format_is_pure_integer(out_format
)) {
1840 * In this case fs_type was really ints or uints disguised as floats,
1843 fs_type
.floating
= 0;
1844 fs_type
.sign
= dst_type
.sign
;
1845 for (i
= 0; i
< num_fullblock_fs
; ++i
) {
1846 for (j
= 0; j
< dst_channels
; ++j
) {
1847 fs_src
[i
][j
] = LLVMBuildBitCast(builder
, fs_src
[i
][j
],
1848 lp_build_vec_type(gallivm
, fs_type
), "");
1850 if (dst_channels
== 3 && !has_alpha
) {
1851 fs_src
[i
][3] = LLVMBuildBitCast(builder
, fs_src
[i
][3],
1852 lp_build_vec_type(gallivm
, fs_type
), "");
1858 * Pixel twiddle from fragment shader order to memory order
1860 src_count
= generate_fs_twiddle(gallivm
, fs_type
, num_fullblock_fs
,
1861 dst_channels
, fs_src
, src
, pad_inline
);
1862 if (dual_source_blend
) {
1863 generate_fs_twiddle(gallivm
, fs_type
, num_fullblock_fs
, dst_channels
,
1864 fs_src1
, src1
, pad_inline
);
1867 src_channels
= dst_channels
< 3 ? dst_channels
: 4;
1868 if (src_count
!= num_fullblock_fs
* src_channels
) {
1869 unsigned ds
= src_count
/ (num_fullblock_fs
* src_channels
);
1870 row_type
.length
/= ds
;
1871 fs_type
.length
= row_type
.length
;
1874 blend_type
= row_type
;
1875 mask_type
.length
= 4;
1877 /* Convert src to row_type */
1878 if (dual_source_blend
) {
1879 struct lp_type old_row_type
= row_type
;
1880 lp_build_conv_auto(gallivm
, fs_type
, &row_type
, src
, src_count
, src
);
1881 src_count
= lp_build_conv_auto(gallivm
, fs_type
, &old_row_type
, src1
, src_count
, src1
);
1884 src_count
= lp_build_conv_auto(gallivm
, fs_type
, &row_type
, src
, src_count
, src
);
1887 /* If the rows are not an SSE vector, combine them to become SSE size! */
1888 if ((row_type
.width
* row_type
.length
) % 128) {
1889 unsigned bits
= row_type
.width
* row_type
.length
;
1892 assert(src_count
>= (vector_width
/ bits
));
1894 dst_count
= src_count
/ (vector_width
/ bits
);
1896 combined
= lp_build_concat_n(gallivm
, row_type
, src
, src_count
, src
, dst_count
);
1897 if (dual_source_blend
) {
1898 lp_build_concat_n(gallivm
, row_type
, src1
, src_count
, src1
, dst_count
);
1901 row_type
.length
*= combined
;
1902 src_count
/= combined
;
1904 bits
= row_type
.width
* row_type
.length
;
1905 assert(bits
== 128 || bits
== 256);
1910 * Blend Colour conversion
1912 blend_color
= lp_jit_context_f_blend_color(gallivm
, context_ptr
);
1913 blend_color
= LLVMBuildPointerCast(builder
, blend_color
, LLVMPointerType(lp_build_vec_type(gallivm
, fs_type
), 0), "");
1914 blend_color
= LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, blend_color
, &i32_zero
, 1, ""), "");
1917 lp_build_conv(gallivm
, fs_type
, blend_type
, &blend_color
, 1, &blend_color
, 1);
1919 if (out_format_desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
) {
1921 * since blending is done with floats, there was no conversion.
1922 * However, the rules according to fixed point renderbuffers still
1923 * apply, that is we must clamp inputs to 0.0/1.0.
1924 * (This would apply to separate alpha conversion too but we currently
1925 * force has_alpha to be true.)
1926 * TODO: should skip this with "fake" blend, since post-blend conversion
1927 * will clamp anyway.
1928 * TODO: could also skip this if fragment color clamping is enabled. We
1929 * don't support it natively so it gets baked into the shader however, so
1930 * can't really tell here.
1932 struct lp_build_context f32_bld
;
1933 assert(row_type
.floating
);
1934 lp_build_context_init(&f32_bld
, gallivm
, row_type
);
1935 for (i
= 0; i
< src_count
; i
++) {
1936 src
[i
] = lp_build_clamp_zero_one_nanzero(&f32_bld
, src
[i
]);
1938 if (dual_source_blend
) {
1939 for (i
= 0; i
< src_count
; i
++) {
1940 src1
[i
] = lp_build_clamp_zero_one_nanzero(&f32_bld
, src1
[i
]);
1943 /* probably can't be different than row_type but better safe than sorry... */
1944 lp_build_context_init(&f32_bld
, gallivm
, blend_type
);
1945 blend_color
= lp_build_clamp(&f32_bld
, blend_color
, f32_bld
.zero
, f32_bld
.one
);
1949 blend_alpha
= lp_build_extract_broadcast(gallivm
, blend_type
, row_type
, blend_color
, lp_build_const_int32(gallivm
, 3));
1951 /* Swizzle to appropriate channels, e.g. from RGBA to BGRA BGRA */
1952 pad_inline
&= (dst_channels
* (block_size
/ src_count
) * row_type
.width
) != vector_width
;
1954 /* Use all 4 channels e.g. from RGBA RGBA to RGxx RGxx */
1955 blend_color
= lp_build_swizzle_aos_n(gallivm
, blend_color
, swizzle
, TGSI_NUM_CHANNELS
, row_type
.length
);
1957 /* Only use dst_channels e.g. RGBA RGBA to RG RG xxxx */
1958 blend_color
= lp_build_swizzle_aos_n(gallivm
, blend_color
, swizzle
, dst_channels
, row_type
.length
);
1964 lp_bld_quad_twiddle(gallivm
, mask_type
, &src_mask
[0], block_height
, &src_mask
[0]);
1966 if (src_count
< block_height
) {
1967 lp_build_concat_n(gallivm
, mask_type
, src_mask
, 4, src_mask
, src_count
);
1968 } else if (src_count
> block_height
) {
1969 for (i
= src_count
; i
> 0; --i
) {
1970 unsigned pixels
= block_size
/ src_count
;
1971 unsigned idx
= i
- 1;
1973 src_mask
[idx
] = lp_build_extract_range(gallivm
, src_mask
[(idx
* pixels
) / 4],
1974 (idx
* pixels
) % 4, pixels
);
1978 assert(mask_type
.width
== 32);
1980 for (i
= 0; i
< src_count
; ++i
) {
1981 unsigned pixels
= block_size
/ src_count
;
1982 unsigned pixel_width
= row_type
.width
* dst_channels
;
1984 if (pixel_width
== 24) {
1985 mask_type
.width
= 8;
1986 mask_type
.length
= vector_width
/ mask_type
.width
;
1988 mask_type
.length
= pixels
;
1989 mask_type
.width
= row_type
.width
* dst_channels
;
1991 src_mask
[i
] = LLVMBuildIntCast(builder
, src_mask
[i
], lp_build_int_vec_type(gallivm
, mask_type
), "");
1993 mask_type
.length
*= dst_channels
;
1994 mask_type
.width
/= dst_channels
;
1997 src_mask
[i
] = LLVMBuildBitCast(builder
, src_mask
[i
], lp_build_int_vec_type(gallivm
, mask_type
), "");
1998 src_mask
[i
] = lp_build_pad_vector(gallivm
, src_mask
[i
], row_type
.length
);
2005 struct lp_type alpha_type
= fs_type
;
2006 alpha_type
.length
= 4;
2007 convert_alpha(gallivm
, row_type
, alpha_type
,
2008 block_size
, block_height
,
2009 src_count
, dst_channels
,
2010 pad_inline
, src_alpha
);
2011 if (dual_source_blend
) {
2012 convert_alpha(gallivm
, row_type
, alpha_type
,
2013 block_size
, block_height
,
2014 src_count
, dst_channels
,
2015 pad_inline
, src1_alpha
);
2021 * Load dst from memory
2023 if (src_count
< block_height
) {
2024 dst_count
= block_height
;
2026 dst_count
= src_count
;
2029 dst_type
.length
*= block_size
/ dst_count
;
2031 if (format_expands_to_float_soa(out_format_desc
)) {
2033 * we need multiple values at once for the conversion, so can as well
2034 * load them vectorized here too instead of concatenating later.
2035 * (Still need concatenation later for 8-wide vectors).
2037 dst_count
= block_height
;
2038 dst_type
.length
= block_width
;
2042 * Compute the alignment of the destination pointer in bytes
2043 * We fetch 1-4 pixels, if the format has pot alignment then those fetches
2044 * are always aligned by MIN2(16, fetch_width) except for buffers (not
2045 * 1d tex but can't distinguish here) so need to stick with per-pixel
2046 * alignment in this case.
2049 dst_alignment
= (out_format_desc
->block
.bits
+ 7)/(out_format_desc
->block
.width
* 8);
2052 dst_alignment
= dst_type
.length
* dst_type
.width
/ 8;
2054 /* Force power-of-two alignment by extracting only the least-significant-bit */
2055 dst_alignment
= 1 << (ffs(dst_alignment
) - 1);
2057 * Resource base and stride pointers are aligned to 16 bytes, so that's
2058 * the maximum alignment we can guarantee
2060 dst_alignment
= MIN2(16, dst_alignment
);
2063 load_unswizzled_block(gallivm
, color_ptr
, stride
, block_width
, 1,
2064 dst
, dst_type
, dst_count
/ 4, dst_alignment
);
2065 for (i
= dst_count
/ 4; i
< dst_count
; i
++) {
2066 dst
[i
] = lp_build_undef(gallivm
, dst_type
);
2071 load_unswizzled_block(gallivm
, color_ptr
, stride
, block_width
, block_height
,
2072 dst
, dst_type
, dst_count
, dst_alignment
);
2077 * Convert from dst/output format to src/blending format.
2079 * This is necessary as we can only read 1 row from memory at a time,
2080 * so the minimum dst_count will ever be at this point is 4.
2082 * With, for example, R8 format you can have all 16 pixels in a 128 bit vector,
2083 * this will take the 4 dsts and combine them into 1 src so we can perform blending
2084 * on all 16 pixels in that single vector at once.
2086 if (dst_count
> src_count
) {
2087 lp_build_concat_n(gallivm
, dst_type
, dst
, 4, dst
, src_count
);
2093 /* XXX this is broken for RGB8 formats -
2094 * they get expanded from 12 to 16 elements (to include alpha)
2095 * by convert_to_blend_type then reduced to 15 instead of 12
2096 * by convert_from_blend_type (a simple fix though breaks A8...).
2097 * R16G16B16 also crashes differently however something going wrong
2098 * inside llvm handling npot vector sizes seemingly.
2099 * It seems some cleanup could be done here (like skipping conversion/blend
2102 convert_to_blend_type(gallivm
, block_size
, out_format_desc
, dst_type
, row_type
, dst
, src_count
);
2105 * FIXME: Really should get logic ops / masks out of generic blend / row
2106 * format. Logic ops will definitely not work on the blend float format
2107 * used for SRGB here and I think OpenGL expects this to work as expected
2108 * (that is incoming values converted to srgb then logic op applied).
2110 for (i
= 0; i
< src_count
; ++i
) {
2111 dst
[i
] = lp_build_blend_aos(gallivm
,
2112 &variant
->key
.blend
,
2117 has_alpha
? NULL
: src_alpha
[i
],
2119 has_alpha
? NULL
: src1_alpha
[i
],
2121 partial_mask
? src_mask
[i
] : NULL
,
2123 has_alpha
? NULL
: blend_alpha
,
2125 pad_inline
? 4 : dst_channels
);
2128 convert_from_blend_type(gallivm
, block_size
, out_format_desc
, row_type
, dst_type
, dst
, src_count
);
2130 /* Split the blend rows back to memory rows */
2131 if (dst_count
> src_count
) {
2132 row_type
.length
= dst_type
.length
* (dst_count
/ src_count
);
2134 if (src_count
== 1) {
2135 dst
[1] = lp_build_extract_range(gallivm
, dst
[0], row_type
.length
/ 2, row_type
.length
/ 2);
2136 dst
[0] = lp_build_extract_range(gallivm
, dst
[0], 0, row_type
.length
/ 2);
2138 row_type
.length
/= 2;
2142 dst
[3] = lp_build_extract_range(gallivm
, dst
[1], row_type
.length
/ 2, row_type
.length
/ 2);
2143 dst
[2] = lp_build_extract_range(gallivm
, dst
[1], 0, row_type
.length
/ 2);
2144 dst
[1] = lp_build_extract_range(gallivm
, dst
[0], row_type
.length
/ 2, row_type
.length
/ 2);
2145 dst
[0] = lp_build_extract_range(gallivm
, dst
[0], 0, row_type
.length
/ 2);
2147 row_type
.length
/= 2;
2152 * Store blend result to memory
2155 store_unswizzled_block(gallivm
, color_ptr
, stride
, block_width
, 1,
2156 dst
, dst_type
, dst_count
/ 4, dst_alignment
);
2159 store_unswizzled_block(gallivm
, color_ptr
, stride
, block_width
, block_height
,
2160 dst
, dst_type
, dst_count
, dst_alignment
);
2163 if (have_smallfloat_format(dst_type
, out_format
)) {
2164 lp_build_fpstate_set(gallivm
, fpstate
);
2168 lp_build_mask_end(&mask_ctx
);
2174 * Generate the runtime callable function for the whole fragment pipeline.
2175 * Note that the function which we generate operates on a block of 16
2176 * pixels at at time. The block contains 2x2 quads. Each quad contains
2180 generate_fragment(struct llvmpipe_context
*lp
,
2181 struct lp_fragment_shader
*shader
,
2182 struct lp_fragment_shader_variant
*variant
,
2183 unsigned partial_mask
)
2185 struct gallivm_state
*gallivm
= variant
->gallivm
;
2186 const struct lp_fragment_shader_variant_key
*key
= &variant
->key
;
2187 struct lp_shader_input inputs
[PIPE_MAX_SHADER_INPUTS
];
2189 struct lp_type fs_type
;
2190 struct lp_type blend_type
;
2191 LLVMTypeRef fs_elem_type
;
2192 LLVMTypeRef blend_vec_type
;
2193 LLVMTypeRef arg_types
[13];
2194 LLVMTypeRef func_type
;
2195 LLVMTypeRef int32_type
= LLVMInt32TypeInContext(gallivm
->context
);
2196 LLVMTypeRef int8_type
= LLVMInt8TypeInContext(gallivm
->context
);
2197 LLVMValueRef context_ptr
;
2200 LLVMValueRef a0_ptr
;
2201 LLVMValueRef dadx_ptr
;
2202 LLVMValueRef dady_ptr
;
2203 LLVMValueRef color_ptr_ptr
;
2204 LLVMValueRef stride_ptr
;
2205 LLVMValueRef depth_ptr
;
2206 LLVMValueRef depth_stride
;
2207 LLVMValueRef mask_input
;
2208 LLVMValueRef thread_data_ptr
;
2209 LLVMBasicBlockRef block
;
2210 LLVMBuilderRef builder
;
2211 struct lp_build_sampler_soa
*sampler
;
2212 struct lp_build_interp_soa_context interp
;
2213 LLVMValueRef fs_mask
[16 / 4];
2214 LLVMValueRef fs_out_color
[PIPE_MAX_COLOR_BUFS
][TGSI_NUM_CHANNELS
][16 / 4];
2215 LLVMValueRef function
;
2216 LLVMValueRef facing
;
2221 boolean cbuf0_write_all
;
2222 const boolean dual_source_blend
= key
->blend
.rt
[0].blend_enable
&&
2223 util_blend_state_is_dual(&key
->blend
, 0);
2225 assert(lp_native_vector_width
/ 32 >= 4);
2227 /* Adjust color input interpolation according to flatshade state:
2229 memcpy(inputs
, shader
->inputs
, shader
->info
.base
.num_inputs
* sizeof inputs
[0]);
2230 for (i
= 0; i
< shader
->info
.base
.num_inputs
; i
++) {
2231 if (inputs
[i
].interp
== LP_INTERP_COLOR
) {
2233 inputs
[i
].interp
= LP_INTERP_CONSTANT
;
2235 inputs
[i
].interp
= LP_INTERP_PERSPECTIVE
;
2239 /* check if writes to cbuf[0] are to be copied to all cbufs */
2241 shader
->info
.base
.properties
[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
];
2243 /* TODO: actually pick these based on the fs and color buffer
2244 * characteristics. */
2246 memset(&fs_type
, 0, sizeof fs_type
);
2247 fs_type
.floating
= TRUE
; /* floating point values */
2248 fs_type
.sign
= TRUE
; /* values are signed */
2249 fs_type
.norm
= FALSE
; /* values are not limited to [0,1] or [-1,1] */
2250 fs_type
.width
= 32; /* 32-bit float */
2251 fs_type
.length
= MIN2(lp_native_vector_width
/ 32, 16); /* n*4 elements per vector */
2253 memset(&blend_type
, 0, sizeof blend_type
);
2254 blend_type
.floating
= FALSE
; /* values are integers */
2255 blend_type
.sign
= FALSE
; /* values are unsigned */
2256 blend_type
.norm
= TRUE
; /* values are in [0,1] or [-1,1] */
2257 blend_type
.width
= 8; /* 8-bit ubyte values */
2258 blend_type
.length
= 16; /* 16 elements per vector */
2261 * Generate the function prototype. Any change here must be reflected in
2262 * lp_jit.h's lp_jit_frag_func function pointer type, and vice-versa.
2265 fs_elem_type
= lp_build_elem_type(gallivm
, fs_type
);
2267 blend_vec_type
= lp_build_vec_type(gallivm
, blend_type
);
2269 util_snprintf(func_name
, sizeof(func_name
), "fs%u_variant%u_%s",
2270 shader
->no
, variant
->no
, partial_mask
? "partial" : "whole");
2272 arg_types
[0] = variant
->jit_context_ptr_type
; /* context */
2273 arg_types
[1] = int32_type
; /* x */
2274 arg_types
[2] = int32_type
; /* y */
2275 arg_types
[3] = int32_type
; /* facing */
2276 arg_types
[4] = LLVMPointerType(fs_elem_type
, 0); /* a0 */
2277 arg_types
[5] = LLVMPointerType(fs_elem_type
, 0); /* dadx */
2278 arg_types
[6] = LLVMPointerType(fs_elem_type
, 0); /* dady */
2279 arg_types
[7] = LLVMPointerType(LLVMPointerType(blend_vec_type
, 0), 0); /* color */
2280 arg_types
[8] = LLVMPointerType(int8_type
, 0); /* depth */
2281 arg_types
[9] = int32_type
; /* mask_input */
2282 arg_types
[10] = variant
->jit_thread_data_ptr_type
; /* per thread data */
2283 arg_types
[11] = LLVMPointerType(int32_type
, 0); /* stride */
2284 arg_types
[12] = int32_type
; /* depth_stride */
2286 func_type
= LLVMFunctionType(LLVMVoidTypeInContext(gallivm
->context
),
2287 arg_types
, ARRAY_SIZE(arg_types
), 0);
2289 function
= LLVMAddFunction(gallivm
->module
, func_name
, func_type
);
2290 LLVMSetFunctionCallConv(function
, LLVMCCallConv
);
2292 variant
->function
[partial_mask
] = function
;
2294 /* XXX: need to propagate noalias down into color param now we are
2295 * passing a pointer-to-pointer?
2297 for(i
= 0; i
< ARRAY_SIZE(arg_types
); ++i
)
2298 if(LLVMGetTypeKind(arg_types
[i
]) == LLVMPointerTypeKind
)
2299 lp_add_function_attr(function
, i
+ 1, LP_FUNC_ATTR_NOALIAS
);
2301 context_ptr
= LLVMGetParam(function
, 0);
2302 x
= LLVMGetParam(function
, 1);
2303 y
= LLVMGetParam(function
, 2);
2304 facing
= LLVMGetParam(function
, 3);
2305 a0_ptr
= LLVMGetParam(function
, 4);
2306 dadx_ptr
= LLVMGetParam(function
, 5);
2307 dady_ptr
= LLVMGetParam(function
, 6);
2308 color_ptr_ptr
= LLVMGetParam(function
, 7);
2309 depth_ptr
= LLVMGetParam(function
, 8);
2310 mask_input
= LLVMGetParam(function
, 9);
2311 thread_data_ptr
= LLVMGetParam(function
, 10);
2312 stride_ptr
= LLVMGetParam(function
, 11);
2313 depth_stride
= LLVMGetParam(function
, 12);
2315 lp_build_name(context_ptr
, "context");
2316 lp_build_name(x
, "x");
2317 lp_build_name(y
, "y");
2318 lp_build_name(a0_ptr
, "a0");
2319 lp_build_name(dadx_ptr
, "dadx");
2320 lp_build_name(dady_ptr
, "dady");
2321 lp_build_name(color_ptr_ptr
, "color_ptr_ptr");
2322 lp_build_name(depth_ptr
, "depth");
2323 lp_build_name(mask_input
, "mask_input");
2324 lp_build_name(thread_data_ptr
, "thread_data");
2325 lp_build_name(stride_ptr
, "stride_ptr");
2326 lp_build_name(depth_stride
, "depth_stride");
2332 block
= LLVMAppendBasicBlockInContext(gallivm
->context
, function
, "entry");
2333 builder
= gallivm
->builder
;
2335 LLVMPositionBuilderAtEnd(builder
, block
);
2337 /* code generated texture sampling */
2338 sampler
= lp_llvm_sampler_soa_create(key
->state
);
2340 num_fs
= 16 / fs_type
.length
; /* number of loops per 4x4 stamp */
2341 /* for 1d resources only run "upper half" of stamp */
2342 if (key
->resource_1d
)
2346 LLVMValueRef num_loop
= lp_build_const_int32(gallivm
, num_fs
);
2347 LLVMTypeRef mask_type
= lp_build_int_vec_type(gallivm
, fs_type
);
2348 LLVMValueRef mask_store
= lp_build_array_alloca(gallivm
, mask_type
,
2349 num_loop
, "mask_store");
2350 LLVMValueRef color_store
[PIPE_MAX_COLOR_BUFS
][TGSI_NUM_CHANNELS
];
2351 boolean pixel_center_integer
=
2352 shader
->info
.base
.properties
[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
];
2355 * The shader input interpolation info is not explicitely baked in the
2356 * shader key, but everything it derives from (TGSI, and flatshade) is
2357 * already included in the shader key.
2359 lp_build_interp_soa_init(&interp
,
2361 shader
->info
.base
.num_inputs
,
2363 pixel_center_integer
,
2366 a0_ptr
, dadx_ptr
, dady_ptr
,
2369 for (i
= 0; i
< num_fs
; i
++) {
2371 LLVMValueRef indexi
= lp_build_const_int32(gallivm
, i
);
2372 LLVMValueRef mask_ptr
= LLVMBuildGEP(builder
, mask_store
,
2373 &indexi
, 1, "mask_ptr");
2376 mask
= generate_quad_mask(gallivm
, fs_type
,
2377 i
*fs_type
.length
/4, mask_input
);
2380 mask
= lp_build_const_int_vec(gallivm
, fs_type
, ~0);
2382 LLVMBuildStore(builder
, mask
, mask_ptr
);
2385 generate_fs_loop(gallivm
,
2393 mask_store
, /* output */
2400 for (i
= 0; i
< num_fs
; i
++) {
2401 LLVMValueRef indexi
= lp_build_const_int32(gallivm
, i
);
2402 LLVMValueRef ptr
= LLVMBuildGEP(builder
, mask_store
,
2404 fs_mask
[i
] = LLVMBuildLoad(builder
, ptr
, "mask");
2405 /* This is fucked up need to reorganize things */
2406 for (cbuf
= 0; cbuf
< key
->nr_cbufs
; cbuf
++) {
2407 for (chan
= 0; chan
< TGSI_NUM_CHANNELS
; ++chan
) {
2408 ptr
= LLVMBuildGEP(builder
,
2409 color_store
[cbuf
* !cbuf0_write_all
][chan
],
2411 fs_out_color
[cbuf
][chan
][i
] = ptr
;
2414 if (dual_source_blend
) {
2415 /* only support one dual source blend target hence always use output 1 */
2416 for (chan
= 0; chan
< TGSI_NUM_CHANNELS
; ++chan
) {
2417 ptr
= LLVMBuildGEP(builder
,
2418 color_store
[1][chan
],
2420 fs_out_color
[1][chan
][i
] = ptr
;
2426 sampler
->destroy(sampler
);
2428 /* Loop over color outputs / color buffers to do blending.
2430 for(cbuf
= 0; cbuf
< key
->nr_cbufs
; cbuf
++) {
2431 if (key
->cbuf_format
[cbuf
] != PIPE_FORMAT_NONE
) {
2432 LLVMValueRef color_ptr
;
2433 LLVMValueRef stride
;
2434 LLVMValueRef index
= lp_build_const_int32(gallivm
, cbuf
);
2436 boolean do_branch
= ((key
->depth
.enabled
2437 || key
->stencil
[0].enabled
2438 || key
->alpha
.enabled
)
2439 && !shader
->info
.base
.uses_kill
);
2441 color_ptr
= LLVMBuildLoad(builder
,
2442 LLVMBuildGEP(builder
, color_ptr_ptr
,
2446 lp_build_name(color_ptr
, "color_ptr%d", cbuf
);
2448 stride
= LLVMBuildLoad(builder
,
2449 LLVMBuildGEP(builder
, stride_ptr
, &index
, 1, ""),
2452 generate_unswizzled_blend(gallivm
, cbuf
, variant
,
2453 key
->cbuf_format
[cbuf
],
2454 num_fs
, fs_type
, fs_mask
, fs_out_color
,
2455 context_ptr
, color_ptr
, stride
,
2456 partial_mask
, do_branch
);
2460 LLVMBuildRetVoid(builder
);
2462 gallivm_verify_function(gallivm
, function
);
2467 dump_fs_variant_key(const struct lp_fragment_shader_variant_key
*key
)
2471 debug_printf("fs variant %p:\n", (void *) key
);
2473 if (key
->flatshade
) {
2474 debug_printf("flatshade = 1\n");
2476 for (i
= 0; i
< key
->nr_cbufs
; ++i
) {
2477 debug_printf("cbuf_format[%u] = %s\n", i
, util_format_name(key
->cbuf_format
[i
]));
2479 if (key
->depth
.enabled
|| key
->stencil
[0].enabled
) {
2480 debug_printf("depth.format = %s\n", util_format_name(key
->zsbuf_format
));
2482 if (key
->depth
.enabled
) {
2483 debug_printf("depth.func = %s\n", util_dump_func(key
->depth
.func
, TRUE
));
2484 debug_printf("depth.writemask = %u\n", key
->depth
.writemask
);
2487 for (i
= 0; i
< 2; ++i
) {
2488 if (key
->stencil
[i
].enabled
) {
2489 debug_printf("stencil[%u].func = %s\n", i
, util_dump_func(key
->stencil
[i
].func
, TRUE
));
2490 debug_printf("stencil[%u].fail_op = %s\n", i
, util_dump_stencil_op(key
->stencil
[i
].fail_op
, TRUE
));
2491 debug_printf("stencil[%u].zpass_op = %s\n", i
, util_dump_stencil_op(key
->stencil
[i
].zpass_op
, TRUE
));
2492 debug_printf("stencil[%u].zfail_op = %s\n", i
, util_dump_stencil_op(key
->stencil
[i
].zfail_op
, TRUE
));
2493 debug_printf("stencil[%u].valuemask = 0x%x\n", i
, key
->stencil
[i
].valuemask
);
2494 debug_printf("stencil[%u].writemask = 0x%x\n", i
, key
->stencil
[i
].writemask
);
2498 if (key
->alpha
.enabled
) {
2499 debug_printf("alpha.func = %s\n", util_dump_func(key
->alpha
.func
, TRUE
));
2502 if (key
->occlusion_count
) {
2503 debug_printf("occlusion_count = 1\n");
2506 if (key
->blend
.logicop_enable
) {
2507 debug_printf("blend.logicop_func = %s\n", util_dump_logicop(key
->blend
.logicop_func
, TRUE
));
2509 else if (key
->blend
.rt
[0].blend_enable
) {
2510 debug_printf("blend.rgb_func = %s\n", util_dump_blend_func (key
->blend
.rt
[0].rgb_func
, TRUE
));
2511 debug_printf("blend.rgb_src_factor = %s\n", util_dump_blend_factor(key
->blend
.rt
[0].rgb_src_factor
, TRUE
));
2512 debug_printf("blend.rgb_dst_factor = %s\n", util_dump_blend_factor(key
->blend
.rt
[0].rgb_dst_factor
, TRUE
));
2513 debug_printf("blend.alpha_func = %s\n", util_dump_blend_func (key
->blend
.rt
[0].alpha_func
, TRUE
));
2514 debug_printf("blend.alpha_src_factor = %s\n", util_dump_blend_factor(key
->blend
.rt
[0].alpha_src_factor
, TRUE
));
2515 debug_printf("blend.alpha_dst_factor = %s\n", util_dump_blend_factor(key
->blend
.rt
[0].alpha_dst_factor
, TRUE
));
2517 debug_printf("blend.colormask = 0x%x\n", key
->blend
.rt
[0].colormask
);
2518 if (key
->blend
.alpha_to_coverage
) {
2519 debug_printf("blend.alpha_to_coverage is enabled\n");
2521 for (i
= 0; i
< key
->nr_samplers
; ++i
) {
2522 const struct lp_static_sampler_state
*sampler
= &key
->state
[i
].sampler_state
;
2523 debug_printf("sampler[%u] = \n", i
);
2524 debug_printf(" .wrap = %s %s %s\n",
2525 util_dump_tex_wrap(sampler
->wrap_s
, TRUE
),
2526 util_dump_tex_wrap(sampler
->wrap_t
, TRUE
),
2527 util_dump_tex_wrap(sampler
->wrap_r
, TRUE
));
2528 debug_printf(" .min_img_filter = %s\n",
2529 util_dump_tex_filter(sampler
->min_img_filter
, TRUE
));
2530 debug_printf(" .min_mip_filter = %s\n",
2531 util_dump_tex_mipfilter(sampler
->min_mip_filter
, TRUE
));
2532 debug_printf(" .mag_img_filter = %s\n",
2533 util_dump_tex_filter(sampler
->mag_img_filter
, TRUE
));
2534 if (sampler
->compare_mode
!= PIPE_TEX_COMPARE_NONE
)
2535 debug_printf(" .compare_func = %s\n", util_dump_func(sampler
->compare_func
, TRUE
));
2536 debug_printf(" .normalized_coords = %u\n", sampler
->normalized_coords
);
2537 debug_printf(" .min_max_lod_equal = %u\n", sampler
->min_max_lod_equal
);
2538 debug_printf(" .lod_bias_non_zero = %u\n", sampler
->lod_bias_non_zero
);
2539 debug_printf(" .apply_min_lod = %u\n", sampler
->apply_min_lod
);
2540 debug_printf(" .apply_max_lod = %u\n", sampler
->apply_max_lod
);
2542 for (i
= 0; i
< key
->nr_sampler_views
; ++i
) {
2543 const struct lp_static_texture_state
*texture
= &key
->state
[i
].texture_state
;
2544 debug_printf("texture[%u] = \n", i
);
2545 debug_printf(" .format = %s\n",
2546 util_format_name(texture
->format
));
2547 debug_printf(" .target = %s\n",
2548 util_dump_tex_target(texture
->target
, TRUE
));
2549 debug_printf(" .level_zero_only = %u\n",
2550 texture
->level_zero_only
);
2551 debug_printf(" .pot = %u %u %u\n",
2553 texture
->pot_height
,
2554 texture
->pot_depth
);
2560 lp_debug_fs_variant(const struct lp_fragment_shader_variant
*variant
)
2562 debug_printf("llvmpipe: Fragment shader #%u variant #%u:\n",
2563 variant
->shader
->no
, variant
->no
);
2564 tgsi_dump(variant
->shader
->base
.tokens
, 0);
2565 dump_fs_variant_key(&variant
->key
);
2566 debug_printf("variant->opaque = %u\n", variant
->opaque
);
2572 * Generate a new fragment shader variant from the shader code and
2573 * other state indicated by the key.
2575 static struct lp_fragment_shader_variant
*
2576 generate_variant(struct llvmpipe_context
*lp
,
2577 struct lp_fragment_shader
*shader
,
2578 const struct lp_fragment_shader_variant_key
*key
)
2580 struct lp_fragment_shader_variant
*variant
;
2581 const struct util_format_description
*cbuf0_format_desc
;
2582 boolean fullcolormask
;
2583 char module_name
[64];
2585 variant
= CALLOC_STRUCT(lp_fragment_shader_variant
);
2589 util_snprintf(module_name
, sizeof(module_name
), "fs%u_variant%u",
2590 shader
->no
, shader
->variants_created
);
2592 variant
->gallivm
= gallivm_create(module_name
, lp
->context
);
2593 if (!variant
->gallivm
) {
2598 variant
->shader
= shader
;
2599 variant
->list_item_global
.base
= variant
;
2600 variant
->list_item_local
.base
= variant
;
2601 variant
->no
= shader
->variants_created
++;
2603 memcpy(&variant
->key
, key
, shader
->variant_key_size
);
2606 * Determine whether we are touching all channels in the color buffer.
2608 fullcolormask
= FALSE
;
2609 if (key
->nr_cbufs
== 1) {
2610 cbuf0_format_desc
= util_format_description(key
->cbuf_format
[0]);
2611 fullcolormask
= util_format_colormask_full(cbuf0_format_desc
, key
->blend
.rt
[0].colormask
);
2615 !key
->blend
.logicop_enable
&&
2616 !key
->blend
.rt
[0].blend_enable
&&
2618 !key
->stencil
[0].enabled
&&
2619 !key
->alpha
.enabled
&&
2620 !key
->blend
.alpha_to_coverage
&&
2621 !key
->depth
.enabled
&&
2622 !shader
->info
.base
.uses_kill
2625 if ((shader
->info
.base
.num_tokens
<= 1) &&
2626 !key
->depth
.enabled
&& !key
->stencil
[0].enabled
) {
2627 variant
->ps_inv_multiplier
= 0;
2629 variant
->ps_inv_multiplier
= 1;
2632 if ((LP_DEBUG
& DEBUG_FS
) || (gallivm_debug
& GALLIVM_DEBUG_IR
)) {
2633 lp_debug_fs_variant(variant
);
2636 lp_jit_init_types(variant
);
2638 if (variant
->jit_function
[RAST_EDGE_TEST
] == NULL
)
2639 generate_fragment(lp
, shader
, variant
, RAST_EDGE_TEST
);
2641 if (variant
->jit_function
[RAST_WHOLE
] == NULL
) {
2642 if (variant
->opaque
) {
2643 /* Specialized shader, which doesn't need to read the color buffer. */
2644 generate_fragment(lp
, shader
, variant
, RAST_WHOLE
);
2649 * Compile everything
2652 gallivm_compile_module(variant
->gallivm
);
2654 variant
->nr_instrs
+= lp_build_count_ir_module(variant
->gallivm
->module
);
2656 if (variant
->function
[RAST_EDGE_TEST
]) {
2657 variant
->jit_function
[RAST_EDGE_TEST
] = (lp_jit_frag_func
)
2658 gallivm_jit_function(variant
->gallivm
,
2659 variant
->function
[RAST_EDGE_TEST
]);
2662 if (variant
->function
[RAST_WHOLE
]) {
2663 variant
->jit_function
[RAST_WHOLE
] = (lp_jit_frag_func
)
2664 gallivm_jit_function(variant
->gallivm
,
2665 variant
->function
[RAST_WHOLE
]);
2666 } else if (!variant
->jit_function
[RAST_WHOLE
]) {
2667 variant
->jit_function
[RAST_WHOLE
] = variant
->jit_function
[RAST_EDGE_TEST
];
2670 gallivm_free_ir(variant
->gallivm
);
2677 llvmpipe_create_fs_state(struct pipe_context
*pipe
,
2678 const struct pipe_shader_state
*templ
)
2680 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
2681 struct lp_fragment_shader
*shader
;
2683 int nr_sampler_views
;
2686 shader
= CALLOC_STRUCT(lp_fragment_shader
);
2690 shader
->no
= fs_no
++;
2691 make_empty_list(&shader
->variants
);
2693 /* get/save the summary info for this shader */
2694 lp_build_tgsi_info(templ
->tokens
, &shader
->info
);
2696 /* we need to keep a local copy of the tokens */
2697 shader
->base
.tokens
= tgsi_dup_tokens(templ
->tokens
);
2699 shader
->draw_data
= draw_create_fragment_shader(llvmpipe
->draw
, templ
);
2700 if (shader
->draw_data
== NULL
) {
2701 FREE((void *) shader
->base
.tokens
);
2706 nr_samplers
= shader
->info
.base
.file_max
[TGSI_FILE_SAMPLER
] + 1;
2707 nr_sampler_views
= shader
->info
.base
.file_max
[TGSI_FILE_SAMPLER_VIEW
] + 1;
2709 shader
->variant_key_size
= Offset(struct lp_fragment_shader_variant_key
,
2710 state
[MAX2(nr_samplers
, nr_sampler_views
)]);
2712 for (i
= 0; i
< shader
->info
.base
.num_inputs
; i
++) {
2713 shader
->inputs
[i
].usage_mask
= shader
->info
.base
.input_usage_mask
[i
];
2714 shader
->inputs
[i
].cyl_wrap
= shader
->info
.base
.input_cylindrical_wrap
[i
];
2716 switch (shader
->info
.base
.input_interpolate
[i
]) {
2717 case TGSI_INTERPOLATE_CONSTANT
:
2718 shader
->inputs
[i
].interp
= LP_INTERP_CONSTANT
;
2720 case TGSI_INTERPOLATE_LINEAR
:
2721 shader
->inputs
[i
].interp
= LP_INTERP_LINEAR
;
2723 case TGSI_INTERPOLATE_PERSPECTIVE
:
2724 shader
->inputs
[i
].interp
= LP_INTERP_PERSPECTIVE
;
2726 case TGSI_INTERPOLATE_COLOR
:
2727 shader
->inputs
[i
].interp
= LP_INTERP_COLOR
;
2734 switch (shader
->info
.base
.input_semantic_name
[i
]) {
2735 case TGSI_SEMANTIC_FACE
:
2736 shader
->inputs
[i
].interp
= LP_INTERP_FACING
;
2738 case TGSI_SEMANTIC_POSITION
:
2739 /* Position was already emitted above
2741 shader
->inputs
[i
].interp
= LP_INTERP_POSITION
;
2742 shader
->inputs
[i
].src_index
= 0;
2746 /* XXX this is a completely pointless index map... */
2747 shader
->inputs
[i
].src_index
= i
+1;
2750 if (LP_DEBUG
& DEBUG_TGSI
) {
2752 debug_printf("llvmpipe: Create fragment shader #%u %p:\n",
2753 shader
->no
, (void *) shader
);
2754 tgsi_dump(templ
->tokens
, 0);
2755 debug_printf("usage masks:\n");
2756 for (attrib
= 0; attrib
< shader
->info
.base
.num_inputs
; ++attrib
) {
2757 unsigned usage_mask
= shader
->info
.base
.input_usage_mask
[attrib
];
2758 debug_printf(" IN[%u].%s%s%s%s\n",
2760 usage_mask
& TGSI_WRITEMASK_X
? "x" : "",
2761 usage_mask
& TGSI_WRITEMASK_Y
? "y" : "",
2762 usage_mask
& TGSI_WRITEMASK_Z
? "z" : "",
2763 usage_mask
& TGSI_WRITEMASK_W
? "w" : "");
2773 llvmpipe_bind_fs_state(struct pipe_context
*pipe
, void *fs
)
2775 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
2777 if (llvmpipe
->fs
== fs
)
2780 llvmpipe
->fs
= (struct lp_fragment_shader
*) fs
;
2782 draw_bind_fragment_shader(llvmpipe
->draw
,
2783 (llvmpipe
->fs
? llvmpipe
->fs
->draw_data
: NULL
));
2785 llvmpipe
->dirty
|= LP_NEW_FS
;
2790 * Remove shader variant from two lists: the shader's variant list
2791 * and the context's variant list.
2794 llvmpipe_remove_shader_variant(struct llvmpipe_context
*lp
,
2795 struct lp_fragment_shader_variant
*variant
)
2797 if (gallivm_debug
& GALLIVM_DEBUG_IR
) {
2798 debug_printf("llvmpipe: del fs #%u var #%u v created #%u v cached"
2799 " #%u v total cached #%u\n",
2800 variant
->shader
->no
,
2802 variant
->shader
->variants_created
,
2803 variant
->shader
->variants_cached
,
2804 lp
->nr_fs_variants
);
2807 gallivm_destroy(variant
->gallivm
);
2809 /* remove from shader's list */
2810 remove_from_list(&variant
->list_item_local
);
2811 variant
->shader
->variants_cached
--;
2813 /* remove from context's list */
2814 remove_from_list(&variant
->list_item_global
);
2815 lp
->nr_fs_variants
--;
2816 lp
->nr_fs_instrs
-= variant
->nr_instrs
;
2823 llvmpipe_delete_fs_state(struct pipe_context
*pipe
, void *fs
)
2825 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
2826 struct lp_fragment_shader
*shader
= fs
;
2827 struct lp_fs_variant_list_item
*li
;
2829 assert(fs
!= llvmpipe
->fs
);
2832 * XXX: we need to flush the context until we have some sort of reference
2833 * counting in fragment shaders as they may still be binned
2834 * Flushing alone might not sufficient we need to wait on it too.
2836 llvmpipe_finish(pipe
, __FUNCTION__
);
2838 /* Delete all the variants */
2839 li
= first_elem(&shader
->variants
);
2840 while(!at_end(&shader
->variants
, li
)) {
2841 struct lp_fs_variant_list_item
*next
= next_elem(li
);
2842 llvmpipe_remove_shader_variant(llvmpipe
, li
->base
);
2846 /* Delete draw module's data */
2847 draw_delete_fragment_shader(llvmpipe
->draw
, shader
->draw_data
);
2849 assert(shader
->variants_cached
== 0);
2850 FREE((void *) shader
->base
.tokens
);
2857 llvmpipe_set_constant_buffer(struct pipe_context
*pipe
,
2858 uint shader
, uint index
,
2859 const struct pipe_constant_buffer
*cb
)
2861 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
2862 struct pipe_resource
*constants
= cb
? cb
->buffer
: NULL
;
2864 assert(shader
< PIPE_SHADER_TYPES
);
2865 assert(index
< ARRAY_SIZE(llvmpipe
->constants
[shader
]));
2867 /* note: reference counting */
2868 util_copy_constant_buffer(&llvmpipe
->constants
[shader
][index
], cb
);
2871 if (!(constants
->bind
& PIPE_BIND_CONSTANT_BUFFER
)) {
2872 debug_printf("Illegal set constant without bind flag\n");
2873 constants
->bind
|= PIPE_BIND_CONSTANT_BUFFER
;
2877 if (shader
== PIPE_SHADER_VERTEX
||
2878 shader
== PIPE_SHADER_GEOMETRY
) {
2879 /* Pass the constants to the 'draw' module */
2880 const unsigned size
= cb
? cb
->buffer_size
: 0;
2884 data
= (ubyte
*) llvmpipe_resource_data(constants
);
2886 else if (cb
&& cb
->user_buffer
) {
2887 data
= (ubyte
*) cb
->user_buffer
;
2894 data
+= cb
->buffer_offset
;
2896 draw_set_mapped_constant_buffer(llvmpipe
->draw
, shader
,
2900 llvmpipe
->dirty
|= LP_NEW_FS_CONSTANTS
;
2903 if (cb
&& cb
->user_buffer
) {
2904 pipe_resource_reference(&constants
, NULL
);
2910 * Return the blend factor equivalent to a destination alpha of one.
2912 static inline unsigned
2913 force_dst_alpha_one(unsigned factor
, boolean clamped_zero
)
2916 case PIPE_BLENDFACTOR_DST_ALPHA
:
2917 return PIPE_BLENDFACTOR_ONE
;
2918 case PIPE_BLENDFACTOR_INV_DST_ALPHA
:
2919 return PIPE_BLENDFACTOR_ZERO
;
2920 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE
:
2922 return PIPE_BLENDFACTOR_ZERO
;
2924 return PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE
;
2932 * We need to generate several variants of the fragment pipeline to match
2933 * all the combinations of the contributing state atoms.
2935 * TODO: there is actually no reason to tie this to context state -- the
2936 * generated code could be cached globally in the screen.
2939 make_variant_key(struct llvmpipe_context
*lp
,
2940 struct lp_fragment_shader
*shader
,
2941 struct lp_fragment_shader_variant_key
*key
)
2945 memset(key
, 0, shader
->variant_key_size
);
2947 if (lp
->framebuffer
.zsbuf
) {
2948 enum pipe_format zsbuf_format
= lp
->framebuffer
.zsbuf
->format
;
2949 const struct util_format_description
*zsbuf_desc
=
2950 util_format_description(zsbuf_format
);
2952 if (lp
->depth_stencil
->depth
.enabled
&&
2953 util_format_has_depth(zsbuf_desc
)) {
2954 key
->zsbuf_format
= zsbuf_format
;
2955 memcpy(&key
->depth
, &lp
->depth_stencil
->depth
, sizeof key
->depth
);
2957 if (lp
->depth_stencil
->stencil
[0].enabled
&&
2958 util_format_has_stencil(zsbuf_desc
)) {
2959 key
->zsbuf_format
= zsbuf_format
;
2960 memcpy(&key
->stencil
, &lp
->depth_stencil
->stencil
, sizeof key
->stencil
);
2962 if (llvmpipe_resource_is_1d(lp
->framebuffer
.zsbuf
->texture
)) {
2963 key
->resource_1d
= TRUE
;
2968 * Propagate the depth clamp setting from the rasterizer state.
2969 * depth_clip == 0 implies depth clamping is enabled.
2971 * When clip_halfz is enabled, then always clamp the depth values.
2973 * XXX: This is incorrect for GL, but correct for d3d10 (depth
2974 * clamp is always active in d3d10, regardless if depth clip is
2976 * (GL has an always-on [0,1] clamp on fs depth output instead
2977 * to ensure the depth values stay in range. Doesn't look like
2978 * we do that, though...)
2980 if (lp
->rasterizer
->clip_halfz
) {
2981 key
->depth_clamp
= 1;
2983 key
->depth_clamp
= (lp
->rasterizer
->depth_clip
== 0) ? 1 : 0;
2986 /* alpha test only applies if render buffer 0 is non-integer (or does not exist) */
2987 if (!lp
->framebuffer
.nr_cbufs
||
2988 !lp
->framebuffer
.cbufs
[0] ||
2989 !util_format_is_pure_integer(lp
->framebuffer
.cbufs
[0]->format
)) {
2990 key
->alpha
.enabled
= lp
->depth_stencil
->alpha
.enabled
;
2992 if(key
->alpha
.enabled
)
2993 key
->alpha
.func
= lp
->depth_stencil
->alpha
.func
;
2994 /* alpha.ref_value is passed in jit_context */
2996 key
->flatshade
= lp
->rasterizer
->flatshade
;
2997 if (lp
->active_occlusion_queries
) {
2998 key
->occlusion_count
= TRUE
;
3001 if (lp
->framebuffer
.nr_cbufs
) {
3002 memcpy(&key
->blend
, lp
->blend
, sizeof key
->blend
);
3005 key
->nr_cbufs
= lp
->framebuffer
.nr_cbufs
;
3007 if (!key
->blend
.independent_blend_enable
) {
3008 /* we always need independent blend otherwise the fixups below won't work */
3009 for (i
= 1; i
< key
->nr_cbufs
; i
++) {
3010 memcpy(&key
->blend
.rt
[i
], &key
->blend
.rt
[0], sizeof(key
->blend
.rt
[0]));
3012 key
->blend
.independent_blend_enable
= 1;
3015 for (i
= 0; i
< lp
->framebuffer
.nr_cbufs
; i
++) {
3016 struct pipe_rt_blend_state
*blend_rt
= &key
->blend
.rt
[i
];
3018 if (lp
->framebuffer
.cbufs
[i
]) {
3019 enum pipe_format format
= lp
->framebuffer
.cbufs
[i
]->format
;
3020 const struct util_format_description
*format_desc
;
3022 key
->cbuf_format
[i
] = format
;
3025 * Figure out if this is a 1d resource. Note that OpenGL allows crazy
3026 * mixing of 2d textures with height 1 and 1d textures, so make sure
3027 * we pick 1d if any cbuf or zsbuf is 1d.
3029 if (llvmpipe_resource_is_1d(lp
->framebuffer
.cbufs
[i
]->texture
)) {
3030 key
->resource_1d
= TRUE
;
3033 format_desc
= util_format_description(format
);
3034 assert(format_desc
->colorspace
== UTIL_FORMAT_COLORSPACE_RGB
||
3035 format_desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
);
3038 * Mask out color channels not present in the color buffer.
3040 blend_rt
->colormask
&= util_format_colormask(format_desc
);
3043 * Disable blend for integer formats.
3045 if (util_format_is_pure_integer(format
)) {
3046 blend_rt
->blend_enable
= 0;
3050 * Our swizzled render tiles always have an alpha channel, but the
3051 * linear render target format often does not, so force here the dst
3054 * This is not a mere optimization. Wrong results will be produced if
3055 * the dst alpha is used, the dst format does not have alpha, and the
3056 * previous rendering was not flushed from the swizzled to linear
3057 * buffer. For example, NonPowTwo DCT.
3059 * TODO: This should be generalized to all channels for better
3060 * performance, but only alpha causes correctness issues.
3062 * Also, force rgb/alpha func/factors match, to make AoS blending
3065 if (format_desc
->swizzle
[3] > PIPE_SWIZZLE_W
||
3066 format_desc
->swizzle
[3] == format_desc
->swizzle
[0]) {
3067 /* Doesn't cover mixed snorm/unorm but can't render to them anyway */
3068 boolean clamped_zero
= !util_format_is_float(format
) &&
3069 !util_format_is_snorm(format
);
3070 blend_rt
->rgb_src_factor
=
3071 force_dst_alpha_one(blend_rt
->rgb_src_factor
, clamped_zero
);
3072 blend_rt
->rgb_dst_factor
=
3073 force_dst_alpha_one(blend_rt
->rgb_dst_factor
, clamped_zero
);
3074 blend_rt
->alpha_func
= blend_rt
->rgb_func
;
3075 blend_rt
->alpha_src_factor
= blend_rt
->rgb_src_factor
;
3076 blend_rt
->alpha_dst_factor
= blend_rt
->rgb_dst_factor
;
3080 /* no color buffer for this fragment output */
3081 key
->cbuf_format
[i
] = PIPE_FORMAT_NONE
;
3082 blend_rt
->colormask
= 0x0;
3083 blend_rt
->blend_enable
= 0;
3087 /* This value will be the same for all the variants of a given shader:
3089 key
->nr_samplers
= shader
->info
.base
.file_max
[TGSI_FILE_SAMPLER
] + 1;
3091 for(i
= 0; i
< key
->nr_samplers
; ++i
) {
3092 if(shader
->info
.base
.file_mask
[TGSI_FILE_SAMPLER
] & (1 << i
)) {
3093 lp_sampler_static_sampler_state(&key
->state
[i
].sampler_state
,
3094 lp
->samplers
[PIPE_SHADER_FRAGMENT
][i
]);
3099 * XXX If TGSI_FILE_SAMPLER_VIEW exists assume all texture opcodes
3100 * are dx10-style? Can't really have mixed opcodes, at least not
3101 * if we want to skip the holes here (without rescanning tgsi).
3103 if (shader
->info
.base
.file_max
[TGSI_FILE_SAMPLER_VIEW
] != -1) {
3104 key
->nr_sampler_views
= shader
->info
.base
.file_max
[TGSI_FILE_SAMPLER_VIEW
] + 1;
3105 for(i
= 0; i
< key
->nr_sampler_views
; ++i
) {
3106 if(shader
->info
.base
.file_mask
[TGSI_FILE_SAMPLER_VIEW
] & (1 << i
)) {
3107 lp_sampler_static_texture_state(&key
->state
[i
].texture_state
,
3108 lp
->sampler_views
[PIPE_SHADER_FRAGMENT
][i
]);
3113 key
->nr_sampler_views
= key
->nr_samplers
;
3114 for(i
= 0; i
< key
->nr_sampler_views
; ++i
) {
3115 if(shader
->info
.base
.file_mask
[TGSI_FILE_SAMPLER
] & (1 << i
)) {
3116 lp_sampler_static_texture_state(&key
->state
[i
].texture_state
,
3117 lp
->sampler_views
[PIPE_SHADER_FRAGMENT
][i
]);
3126 * Update fragment shader state. This is called just prior to drawing
3127 * something when some fragment-related state has changed.
3130 llvmpipe_update_fs(struct llvmpipe_context
*lp
)
3132 struct lp_fragment_shader
*shader
= lp
->fs
;
3133 struct lp_fragment_shader_variant_key key
;
3134 struct lp_fragment_shader_variant
*variant
= NULL
;
3135 struct lp_fs_variant_list_item
*li
;
3137 make_variant_key(lp
, shader
, &key
);
3139 /* Search the variants for one which matches the key */
3140 li
= first_elem(&shader
->variants
);
3141 while(!at_end(&shader
->variants
, li
)) {
3142 if(memcmp(&li
->base
->key
, &key
, shader
->variant_key_size
) == 0) {
3150 /* Move this variant to the head of the list to implement LRU
3151 * deletion of shader's when we have too many.
3153 move_to_head(&lp
->fs_variants_list
, &variant
->list_item_global
);
3156 /* variant not found, create it now */
3159 unsigned variants_to_cull
;
3162 debug_printf("%u variants,\t%u instrs,\t%u instrs/variant\n",
3165 lp
->nr_fs_variants
? lp
->nr_fs_instrs
/ lp
->nr_fs_variants
: 0);
3168 /* First, check if we've exceeded the max number of shader variants.
3169 * If so, free 25% of them (the least recently used ones).
3171 variants_to_cull
= lp
->nr_fs_variants
>= LP_MAX_SHADER_VARIANTS
? LP_MAX_SHADER_VARIANTS
/ 4 : 0;
3173 if (variants_to_cull
||
3174 lp
->nr_fs_instrs
>= LP_MAX_SHADER_INSTRUCTIONS
) {
3175 struct pipe_context
*pipe
= &lp
->pipe
;
3178 * XXX: we need to flush the context until we have some sort of
3179 * reference counting in fragment shaders as they may still be binned
3180 * Flushing alone might not be sufficient we need to wait on it too.
3182 llvmpipe_finish(pipe
, __FUNCTION__
);
3185 * We need to re-check lp->nr_fs_variants because an arbitrarliy large
3186 * number of shader variants (potentially all of them) could be
3187 * pending for destruction on flush.
3190 for (i
= 0; i
< variants_to_cull
|| lp
->nr_fs_instrs
>= LP_MAX_SHADER_INSTRUCTIONS
; i
++) {
3191 struct lp_fs_variant_list_item
*item
;
3192 if (is_empty_list(&lp
->fs_variants_list
)) {
3195 item
= last_elem(&lp
->fs_variants_list
);
3198 llvmpipe_remove_shader_variant(lp
, item
->base
);
3203 * Generate the new variant.
3206 variant
= generate_variant(lp
, shader
, &key
);
3209 LP_COUNT_ADD(llvm_compile_time
, dt
);
3210 LP_COUNT_ADD(nr_llvm_compiles
, 2); /* emit vs. omit in/out test */
3212 /* Put the new variant into the list */
3214 insert_at_head(&shader
->variants
, &variant
->list_item_local
);
3215 insert_at_head(&lp
->fs_variants_list
, &variant
->list_item_global
);
3216 lp
->nr_fs_variants
++;
3217 lp
->nr_fs_instrs
+= variant
->nr_instrs
;
3218 shader
->variants_cached
++;
3222 /* Bind this variant */
3223 lp_setup_set_fs_variant(lp
->setup
, variant
);
3231 llvmpipe_init_fs_funcs(struct llvmpipe_context
*llvmpipe
)
3233 llvmpipe
->pipe
.create_fs_state
= llvmpipe_create_fs_state
;
3234 llvmpipe
->pipe
.bind_fs_state
= llvmpipe_bind_fs_state
;
3235 llvmpipe
->pipe
.delete_fs_state
= llvmpipe_delete_fs_state
;
3237 llvmpipe
->pipe
.set_constant_buffer
= llvmpipe_set_constant_buffer
;
3241 * Rasterization is disabled if there is no pixel shader and
3242 * both depth and stencil testing are disabled:
3243 * http://msdn.microsoft.com/en-us/library/windows/desktop/bb205125
3246 llvmpipe_rasterization_disabled(struct llvmpipe_context
*lp
)
3248 boolean null_fs
= !lp
->fs
|| lp
->fs
->info
.base
.num_tokens
<= 1;
3251 !lp
->depth_stencil
->depth
.enabled
&&
3252 !lp
->depth_stencil
->stencil
[0].enabled
);