1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
31 * Code generate the whole fragment pipeline.
33 * The fragment pipeline consists of the following stages:
34 * - triangle edge in/out testing
40 * - depth/stencil test
43 * This file has only the glue to assemble the fragment pipeline. The actual
44 * plumbing of converting Gallium state into LLVM IR is done elsewhere, in the
45 * lp_bld_*.[ch] files, and in a complete generic and reusable way. Here we
46 * muster the LLVM JIT execution engine to create a function that follows an
47 * established binary interface and that can be called from C directly.
49 * A big source of complexity here is that we often want to run different
50 * stages with different precisions and data types and precisions. For example,
51 * the fragment shader needs typically to be done in floats, but the
52 * depth/stencil test and blending is better done in the type that most closely
53 * matches the depth/stencil and color buffer respectively.
55 * Since the width of a SIMD vector register stays the same regardless of the
56 * element type, different types imply different number of elements, so we must
57 * code generate more instances of the stages with larger types to be able to
58 * feed/consume the stages with smaller types.
60 * @author Jose Fonseca <jfonseca@vmware.com>
64 #include "pipe/p_defines.h"
65 #include "util/u_inlines.h"
66 #include "util/u_memory.h"
67 #include "util/u_pointer.h"
68 #include "util/u_format.h"
69 #include "util/u_dump.h"
70 #include "os/os_time.h"
71 #include "pipe/p_shader_tokens.h"
72 #include "draw/draw_context.h"
73 #include "tgsi/tgsi_dump.h"
74 #include "tgsi/tgsi_scan.h"
75 #include "tgsi/tgsi_parse.h"
76 #include "gallivm/lp_bld_type.h"
77 #include "gallivm/lp_bld_const.h"
78 #include "gallivm/lp_bld_conv.h"
79 #include "gallivm/lp_bld_intr.h"
80 #include "gallivm/lp_bld_logic.h"
81 #include "gallivm/lp_bld_tgsi.h"
82 #include "gallivm/lp_bld_swizzle.h"
83 #include "gallivm/lp_bld_flow.h"
84 #include "gallivm/lp_bld_debug.h"
86 #include "lp_bld_alpha.h"
87 #include "lp_bld_blend.h"
88 #include "lp_bld_depth.h"
89 #include "lp_bld_interp.h"
90 #include "lp_context.h"
93 #include "lp_screen.h"
96 #include "lp_tex_sample.h"
99 #include <llvm-c/Analysis.h>
103 * Generate the depth /stencil test code.
106 generate_depth_stencil(LLVMBuilderRef builder
,
107 const struct lp_fragment_shader_variant_key
*key
,
108 struct lp_type src_type
,
109 struct lp_build_mask_context
*mask
,
110 LLVMValueRef stencil_refs
[2],
112 LLVMValueRef dst_ptr
,
114 LLVMValueRef counter
)
116 const struct util_format_description
*format_desc
;
117 struct lp_type dst_type
;
119 if (!key
->depth
.enabled
&& !key
->stencil
[0].enabled
&& !key
->stencil
[1].enabled
)
122 format_desc
= util_format_description(key
->zsbuf_format
);
126 * Depths are expected to be between 0 and 1, even if they are stored in
127 * floats. Setting these bits here will ensure that the lp_build_conv() call
128 * below won't try to unnecessarily clamp the incoming values.
130 if(src_type
.floating
) {
131 src_type
.sign
= FALSE
;
132 src_type
.norm
= TRUE
;
135 assert(!src_type
.sign
);
136 assert(src_type
.norm
);
139 /* Pick the depth type. */
140 dst_type
= lp_depth_type(format_desc
, src_type
.width
*src_type
.length
);
142 /* FIXME: Cope with a depth test type with a different bit width. */
143 assert(dst_type
.width
== src_type
.width
);
144 assert(dst_type
.length
== src_type
.length
);
146 /* Convert fragment Z from float to integer */
147 lp_build_conv(builder
, src_type
, dst_type
, &src
, 1, &src
, 1);
149 dst_ptr
= LLVMBuildBitCast(builder
,
151 LLVMPointerType(lp_build_vec_type(dst_type
), 0), "");
152 lp_build_depth_stencil_test(builder
,
167 * Generate the code to do inside/outside triangle testing for the
168 * four pixels in a 2x2 quad. This will set the four elements of the
169 * quad mask vector to 0 or ~0.
170 * \param i which quad of the quad group to test, in [0,3]
173 generate_tri_edge_mask(LLVMBuilderRef builder
,
175 LLVMValueRef
*mask
, /* ivec4, out */
176 LLVMValueRef c0
, /* int32 */
177 LLVMValueRef c1
, /* int32 */
178 LLVMValueRef c2
, /* int32 */
179 LLVMValueRef step0_ptr
, /* ivec4 */
180 LLVMValueRef step1_ptr
, /* ivec4 */
181 LLVMValueRef step2_ptr
) /* ivec4 */
183 #define OPTIMIZE_IN_OUT_TEST 0
184 #if OPTIMIZE_IN_OUT_TEST
185 struct lp_build_if_state ifctx
;
186 LLVMValueRef not_draw_all
;
188 struct lp_build_flow_context
*flow
;
189 struct lp_type i32_type
;
190 LLVMTypeRef i32vec4_type
;
191 LLVMValueRef c0_vec
, c1_vec
, c2_vec
;
192 LLVMValueRef in_out_mask
;
196 /* int32 vector type */
197 memset(&i32_type
, 0, sizeof i32_type
);
198 i32_type
.floating
= FALSE
; /* values are integers */
199 i32_type
.sign
= TRUE
; /* values are signed */
200 i32_type
.norm
= FALSE
; /* values are not normalized */
201 i32_type
.width
= 32; /* 32-bit int values */
202 i32_type
.length
= 4; /* 4 elements per vector */
204 i32vec4_type
= lp_build_int32_vec4_type();
207 * Use a conditional here to do detailed pixel in/out testing.
208 * We only have to do this if c0 != INT_MIN.
210 flow
= lp_build_flow_create(builder
);
211 lp_build_flow_scope_begin(flow
);
214 #if OPTIMIZE_IN_OUT_TEST
215 /* not_draw_all = (c0 != INT_MIN) */
216 not_draw_all
= LLVMBuildICmp(builder
,
219 LLVMConstInt(LLVMInt32Type(), INT_MIN
, 0),
222 in_out_mask
= lp_build_const_int_vec(i32_type
, ~0);
225 lp_build_flow_scope_declare(flow
, &in_out_mask
);
227 /* if (not_draw_all) {... */
228 lp_build_if(&ifctx
, flow
, builder
, not_draw_all
);
231 LLVMValueRef step0_vec
, step1_vec
, step2_vec
;
232 LLVMValueRef m0_vec
, m1_vec
, m2_vec
;
233 LLVMValueRef index
, m
;
235 /* c0_vec = {c0, c0, c0, c0}
236 * Note that we emit this code four times but LLVM optimizes away
237 * three instances of it.
239 c0_vec
= lp_build_broadcast(builder
, i32vec4_type
, c0
);
240 c1_vec
= lp_build_broadcast(builder
, i32vec4_type
, c1
);
241 c2_vec
= lp_build_broadcast(builder
, i32vec4_type
, c2
);
242 lp_build_name(c0_vec
, "edgeconst0vec");
243 lp_build_name(c1_vec
, "edgeconst1vec");
244 lp_build_name(c2_vec
, "edgeconst2vec");
246 /* load step0vec, step1, step2 vec from memory */
247 index
= LLVMConstInt(LLVMInt32Type(), i
, 0);
248 step0_vec
= LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, step0_ptr
, &index
, 1, ""), "");
249 step1_vec
= LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, step1_ptr
, &index
, 1, ""), "");
250 step2_vec
= LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, step2_ptr
, &index
, 1, ""), "");
251 lp_build_name(step0_vec
, "step0vec");
252 lp_build_name(step1_vec
, "step1vec");
253 lp_build_name(step2_vec
, "step2vec");
255 /* m0_vec = step0_ptr[i] > c0_vec */
256 m0_vec
= lp_build_compare(builder
, i32_type
, PIPE_FUNC_GREATER
, step0_vec
, c0_vec
);
257 m1_vec
= lp_build_compare(builder
, i32_type
, PIPE_FUNC_GREATER
, step1_vec
, c1_vec
);
258 m2_vec
= lp_build_compare(builder
, i32_type
, PIPE_FUNC_GREATER
, step2_vec
, c2_vec
);
260 /* in_out_mask = m0_vec & m1_vec & m2_vec */
261 m
= LLVMBuildAnd(builder
, m0_vec
, m1_vec
, "");
262 in_out_mask
= LLVMBuildAnd(builder
, m
, m2_vec
, "");
263 lp_build_name(in_out_mask
, "inoutmaskvec");
265 #if OPTIMIZE_IN_OUT_TEST
266 lp_build_endif(&ifctx
);
270 lp_build_flow_scope_end(flow
);
271 lp_build_flow_destroy(flow
);
273 /* This is the initial alive/dead pixel mask for a quad of four pixels.
274 * It's an int[4] vector with each word set to 0 or ~0.
275 * Words will get cleared when pixels faile the Z test, etc.
282 generate_scissor_test(LLVMBuilderRef builder
,
283 LLVMValueRef context_ptr
,
284 const struct lp_build_interp_soa_context
*interp
,
287 LLVMTypeRef vec_type
= lp_build_vec_type(type
);
288 LLVMValueRef xpos
= interp
->pos
[0], ypos
= interp
->pos
[1];
289 LLVMValueRef xmin
, ymin
, xmax
, ymax
;
290 LLVMValueRef m0
, m1
, m2
, m3
, m
;
292 /* xpos, ypos contain the window coords for the four pixels in the quad */
296 /* get the current scissor bounds, convert to vectors */
297 xmin
= lp_jit_context_scissor_xmin_value(builder
, context_ptr
);
298 xmin
= lp_build_broadcast(builder
, vec_type
, xmin
);
300 ymin
= lp_jit_context_scissor_ymin_value(builder
, context_ptr
);
301 ymin
= lp_build_broadcast(builder
, vec_type
, ymin
);
303 xmax
= lp_jit_context_scissor_xmax_value(builder
, context_ptr
);
304 xmax
= lp_build_broadcast(builder
, vec_type
, xmax
);
306 ymax
= lp_jit_context_scissor_ymax_value(builder
, context_ptr
);
307 ymax
= lp_build_broadcast(builder
, vec_type
, ymax
);
309 /* compare the fragment's position coordinates against the scissor bounds */
310 m0
= lp_build_compare(builder
, type
, PIPE_FUNC_GEQUAL
, xpos
, xmin
);
311 m1
= lp_build_compare(builder
, type
, PIPE_FUNC_GEQUAL
, ypos
, ymin
);
312 m2
= lp_build_compare(builder
, type
, PIPE_FUNC_LESS
, xpos
, xmax
);
313 m3
= lp_build_compare(builder
, type
, PIPE_FUNC_LESS
, ypos
, ymax
);
315 /* AND all the masks together */
316 m
= LLVMBuildAnd(builder
, m0
, m1
, "");
317 m
= LLVMBuildAnd(builder
, m
, m2
, "");
318 m
= LLVMBuildAnd(builder
, m
, m3
, "");
320 lp_build_name(m
, "scissormask");
327 build_int32_vec_const(int value
)
329 struct lp_type i32_type
;
331 memset(&i32_type
, 0, sizeof i32_type
);
332 i32_type
.floating
= FALSE
; /* values are integers */
333 i32_type
.sign
= TRUE
; /* values are signed */
334 i32_type
.norm
= FALSE
; /* values are not normalized */
335 i32_type
.width
= 32; /* 32-bit int values */
336 i32_type
.length
= 4; /* 4 elements per vector */
337 return lp_build_const_int_vec(i32_type
, value
);
343 * Generate the fragment shader, depth/stencil test, and alpha tests.
344 * \param i which quad in the tile, in range [0,3]
345 * \param do_tri_test if 1, do triangle edge in/out testing
348 generate_fs(struct llvmpipe_context
*lp
,
349 struct lp_fragment_shader
*shader
,
350 const struct lp_fragment_shader_variant_key
*key
,
351 LLVMBuilderRef builder
,
353 LLVMValueRef context_ptr
,
355 const struct lp_build_interp_soa_context
*interp
,
356 struct lp_build_sampler_soa
*sampler
,
358 LLVMValueRef (*color
)[4],
359 LLVMValueRef depth_ptr
,
361 unsigned do_tri_test
,
365 LLVMValueRef step0_ptr
,
366 LLVMValueRef step1_ptr
,
367 LLVMValueRef step2_ptr
,
368 LLVMValueRef counter
)
370 const struct tgsi_token
*tokens
= shader
->base
.tokens
;
371 LLVMTypeRef vec_type
;
372 LLVMValueRef consts_ptr
;
373 LLVMValueRef outputs
[PIPE_MAX_SHADER_OUTPUTS
][NUM_CHANNELS
];
374 LLVMValueRef z
= interp
->pos
[2];
375 LLVMValueRef stencil_refs
[2];
376 struct lp_build_flow_context
*flow
;
377 struct lp_build_mask_context mask
;
378 boolean early_depth_stencil_test
;
385 stencil_refs
[0] = lp_jit_context_stencil_ref_front_value(builder
, context_ptr
);
386 stencil_refs
[1] = lp_jit_context_stencil_ref_back_value(builder
, context_ptr
);
388 vec_type
= lp_build_vec_type(type
);
390 consts_ptr
= lp_jit_context_constants(builder
, context_ptr
);
392 flow
= lp_build_flow_create(builder
);
394 memset(outputs
, 0, sizeof outputs
);
396 lp_build_flow_scope_begin(flow
);
398 /* Declare the color and z variables */
399 for(cbuf
= 0; cbuf
< key
->nr_cbufs
; cbuf
++) {
400 for(chan
= 0; chan
< NUM_CHANNELS
; ++chan
) {
401 color
[cbuf
][chan
] = LLVMGetUndef(vec_type
);
402 lp_build_flow_scope_declare(flow
, &color
[cbuf
][chan
]);
405 lp_build_flow_scope_declare(flow
, &z
);
407 /* do triangle edge testing */
409 generate_tri_edge_mask(builder
, i
, pmask
,
410 c0
, c1
, c2
, step0_ptr
, step1_ptr
, step2_ptr
);
413 *pmask
= build_int32_vec_const(~0);
416 /* 'mask' will control execution based on quad's pixel alive/killed state */
417 lp_build_mask_begin(&mask
, flow
, type
, *pmask
);
421 generate_scissor_test(builder
, context_ptr
, interp
, type
);
422 lp_build_mask_update(&mask
, smask
);
425 early_depth_stencil_test
=
426 (key
->depth
.enabled
|| key
->stencil
[0].enabled
) &&
427 !key
->alpha
.enabled
&&
428 !shader
->info
.uses_kill
&&
429 !shader
->info
.writes_z
;
431 if (early_depth_stencil_test
)
432 generate_depth_stencil(builder
, key
,
434 stencil_refs
, z
, depth_ptr
, facing
, counter
);
436 lp_build_tgsi_soa(builder
, tokens
, type
, &mask
,
437 consts_ptr
, interp
->pos
, interp
->inputs
,
438 outputs
, sampler
, &shader
->info
);
440 /* loop over fragment shader outputs/results */
441 for (attrib
= 0; attrib
< shader
->info
.num_outputs
; ++attrib
) {
442 for(chan
= 0; chan
< NUM_CHANNELS
; ++chan
) {
443 if(outputs
[attrib
][chan
]) {
444 LLVMValueRef out
= LLVMBuildLoad(builder
, outputs
[attrib
][chan
], "");
445 lp_build_name(out
, "output%u.%u.%c", i
, attrib
, "xyzw"[chan
]);
447 switch (shader
->info
.output_semantic_name
[attrib
]) {
448 case TGSI_SEMANTIC_COLOR
:
450 unsigned cbuf
= shader
->info
.output_semantic_index
[attrib
];
452 lp_build_name(out
, "color%u.%u.%c", i
, attrib
, "rgba"[chan
]);
455 /* XXX: should the alpha reference value be passed separately? */
456 /* XXX: should only test the final assignment to alpha */
457 if(cbuf
== 0 && chan
== 3) {
458 LLVMValueRef alpha
= out
;
459 LLVMValueRef alpha_ref_value
;
460 alpha_ref_value
= lp_jit_context_alpha_ref_value(builder
, context_ptr
);
461 alpha_ref_value
= lp_build_broadcast(builder
, vec_type
, alpha_ref_value
);
462 lp_build_alpha_test(builder
, &key
->alpha
, type
,
463 &mask
, alpha
, alpha_ref_value
);
466 color
[cbuf
][chan
] = out
;
470 case TGSI_SEMANTIC_POSITION
:
479 if (!early_depth_stencil_test
)
480 generate_depth_stencil(builder
, key
,
482 stencil_refs
, z
, depth_ptr
, facing
, counter
);
484 lp_build_mask_end(&mask
);
486 lp_build_flow_scope_end(flow
);
488 lp_build_flow_destroy(flow
);
496 * Generate color blending and color output.
497 * \param rt the render target index (to index blend, colormask state)
498 * \param type the pixel color type
499 * \param context_ptr pointer to the runtime JIT context
500 * \param mask execution mask (active fragment/pixel mask)
501 * \param src colors from the fragment shader
502 * \param dst_ptr the destination color buffer pointer
505 generate_blend(const struct pipe_blend_state
*blend
,
507 LLVMBuilderRef builder
,
509 LLVMValueRef context_ptr
,
512 LLVMValueRef dst_ptr
)
514 struct lp_build_context bld
;
515 struct lp_build_flow_context
*flow
;
516 struct lp_build_mask_context mask_ctx
;
517 LLVMTypeRef vec_type
;
518 LLVMValueRef const_ptr
;
524 lp_build_context_init(&bld
, builder
, type
);
526 flow
= lp_build_flow_create(builder
);
528 /* we'll use this mask context to skip blending if all pixels are dead */
529 lp_build_mask_begin(&mask_ctx
, flow
, type
, mask
);
531 vec_type
= lp_build_vec_type(type
);
533 const_ptr
= lp_jit_context_blend_color(builder
, context_ptr
);
534 const_ptr
= LLVMBuildBitCast(builder
, const_ptr
,
535 LLVMPointerType(vec_type
, 0), "");
537 /* load constant blend color and colors from the dest color buffer */
538 for(chan
= 0; chan
< 4; ++chan
) {
539 LLVMValueRef index
= LLVMConstInt(LLVMInt32Type(), chan
, 0);
540 con
[chan
] = LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, const_ptr
, &index
, 1, ""), "");
542 dst
[chan
] = LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, dst_ptr
, &index
, 1, ""), "");
544 lp_build_name(con
[chan
], "con.%c", "rgba"[chan
]);
545 lp_build_name(dst
[chan
], "dst.%c", "rgba"[chan
]);
549 lp_build_blend_soa(builder
, blend
, type
, rt
, src
, dst
, con
, res
);
551 /* store results to color buffer */
552 for(chan
= 0; chan
< 4; ++chan
) {
553 if(blend
->rt
[rt
].colormask
& (1 << chan
)) {
554 LLVMValueRef index
= LLVMConstInt(LLVMInt32Type(), chan
, 0);
555 lp_build_name(res
[chan
], "res.%c", "rgba"[chan
]);
556 res
[chan
] = lp_build_select(&bld
, mask
, res
[chan
], dst
[chan
]);
557 LLVMBuildStore(builder
, res
[chan
], LLVMBuildGEP(builder
, dst_ptr
, &index
, 1, ""));
561 lp_build_mask_end(&mask_ctx
);
562 lp_build_flow_destroy(flow
);
567 * Generate the runtime callable function for the whole fragment pipeline.
568 * Note that the function which we generate operates on a block of 16
569 * pixels at at time. The block contains 2x2 quads. Each quad contains
573 generate_fragment(struct llvmpipe_context
*lp
,
574 struct lp_fragment_shader
*shader
,
575 struct lp_fragment_shader_variant
*variant
,
576 unsigned do_tri_test
)
578 struct llvmpipe_screen
*screen
= llvmpipe_screen(lp
->pipe
.screen
);
579 const struct lp_fragment_shader_variant_key
*key
= &variant
->key
;
580 struct lp_type fs_type
;
581 struct lp_type blend_type
;
582 LLVMTypeRef fs_elem_type
;
583 LLVMTypeRef fs_int_vec_type
;
584 LLVMTypeRef blend_vec_type
;
585 LLVMTypeRef arg_types
[16];
586 LLVMTypeRef func_type
;
587 LLVMTypeRef int32_vec4_type
= lp_build_int32_vec4_type();
588 LLVMValueRef context_ptr
;
592 LLVMValueRef dadx_ptr
;
593 LLVMValueRef dady_ptr
;
594 LLVMValueRef color_ptr_ptr
;
595 LLVMValueRef depth_ptr
;
596 LLVMValueRef c0
, c1
, c2
, step0_ptr
, step1_ptr
, step2_ptr
, counter
= NULL
;
597 LLVMBasicBlockRef block
;
598 LLVMBuilderRef builder
;
599 struct lp_build_sampler_soa
*sampler
;
600 struct lp_build_interp_soa_context interp
;
601 LLVMValueRef fs_mask
[LP_MAX_VECTOR_LENGTH
];
602 LLVMValueRef fs_out_color
[PIPE_MAX_COLOR_BUFS
][NUM_CHANNELS
][LP_MAX_VECTOR_LENGTH
];
603 LLVMValueRef blend_mask
;
604 LLVMValueRef function
;
612 /* TODO: actually pick these based on the fs and color buffer
613 * characteristics. */
615 memset(&fs_type
, 0, sizeof fs_type
);
616 fs_type
.floating
= TRUE
; /* floating point values */
617 fs_type
.sign
= TRUE
; /* values are signed */
618 fs_type
.norm
= FALSE
; /* values are not limited to [0,1] or [-1,1] */
619 fs_type
.width
= 32; /* 32-bit float */
620 fs_type
.length
= 4; /* 4 elements per vector */
621 num_fs
= 4; /* number of quads per block */
623 memset(&blend_type
, 0, sizeof blend_type
);
624 blend_type
.floating
= FALSE
; /* values are integers */
625 blend_type
.sign
= FALSE
; /* values are unsigned */
626 blend_type
.norm
= TRUE
; /* values are in [0,1] or [-1,1] */
627 blend_type
.width
= 8; /* 8-bit ubyte values */
628 blend_type
.length
= 16; /* 16 elements per vector */
631 * Generate the function prototype. Any change here must be reflected in
632 * lp_jit.h's lp_jit_frag_func function pointer type, and vice-versa.
635 fs_elem_type
= lp_build_elem_type(fs_type
);
636 fs_int_vec_type
= lp_build_int_vec_type(fs_type
);
638 blend_vec_type
= lp_build_vec_type(blend_type
);
640 arg_types
[0] = screen
->context_ptr_type
; /* context */
641 arg_types
[1] = LLVMInt32Type(); /* x */
642 arg_types
[2] = LLVMInt32Type(); /* y */
643 arg_types
[3] = LLVMFloatType(); /* facing */
644 arg_types
[4] = LLVMPointerType(fs_elem_type
, 0); /* a0 */
645 arg_types
[5] = LLVMPointerType(fs_elem_type
, 0); /* dadx */
646 arg_types
[6] = LLVMPointerType(fs_elem_type
, 0); /* dady */
647 arg_types
[7] = LLVMPointerType(LLVMPointerType(blend_vec_type
, 0), 0); /* color */
648 arg_types
[8] = LLVMPointerType(fs_int_vec_type
, 0); /* depth */
649 arg_types
[9] = LLVMInt32Type(); /* c0 */
650 arg_types
[10] = LLVMInt32Type(); /* c1 */
651 arg_types
[11] = LLVMInt32Type(); /* c2 */
652 /* Note: the step arrays are built as int32[16] but we interpret
653 * them here as int32_vec4[4].
655 arg_types
[12] = LLVMPointerType(int32_vec4_type
, 0);/* step0 */
656 arg_types
[13] = LLVMPointerType(int32_vec4_type
, 0);/* step1 */
657 arg_types
[14] = LLVMPointerType(int32_vec4_type
, 0);/* step2 */
658 arg_types
[15] = LLVMPointerType(LLVMInt32Type(), 0);/* counter */
660 func_type
= LLVMFunctionType(LLVMVoidType(), arg_types
, Elements(arg_types
), 0);
662 function
= LLVMAddFunction(screen
->module
, "shader", func_type
);
663 LLVMSetFunctionCallConv(function
, LLVMCCallConv
);
665 variant
->function
[do_tri_test
] = function
;
668 /* XXX: need to propagate noalias down into color param now we are
669 * passing a pointer-to-pointer?
671 for(i
= 0; i
< Elements(arg_types
); ++i
)
672 if(LLVMGetTypeKind(arg_types
[i
]) == LLVMPointerTypeKind
)
673 LLVMAddAttribute(LLVMGetParam(function
, i
), LLVMNoAliasAttribute
);
675 context_ptr
= LLVMGetParam(function
, 0);
676 x
= LLVMGetParam(function
, 1);
677 y
= LLVMGetParam(function
, 2);
678 facing
= LLVMGetParam(function
, 3);
679 a0_ptr
= LLVMGetParam(function
, 4);
680 dadx_ptr
= LLVMGetParam(function
, 5);
681 dady_ptr
= LLVMGetParam(function
, 6);
682 color_ptr_ptr
= LLVMGetParam(function
, 7);
683 depth_ptr
= LLVMGetParam(function
, 8);
684 c0
= LLVMGetParam(function
, 9);
685 c1
= LLVMGetParam(function
, 10);
686 c2
= LLVMGetParam(function
, 11);
687 step0_ptr
= LLVMGetParam(function
, 12);
688 step1_ptr
= LLVMGetParam(function
, 13);
689 step2_ptr
= LLVMGetParam(function
, 14);
691 lp_build_name(context_ptr
, "context");
692 lp_build_name(x
, "x");
693 lp_build_name(y
, "y");
694 lp_build_name(a0_ptr
, "a0");
695 lp_build_name(dadx_ptr
, "dadx");
696 lp_build_name(dady_ptr
, "dady");
697 lp_build_name(color_ptr_ptr
, "color_ptr_ptr");
698 lp_build_name(depth_ptr
, "depth");
699 lp_build_name(c0
, "c0");
700 lp_build_name(c1
, "c1");
701 lp_build_name(c2
, "c2");
702 lp_build_name(step0_ptr
, "step0");
703 lp_build_name(step1_ptr
, "step1");
704 lp_build_name(step2_ptr
, "step2");
706 if (key
->occlusion_count
) {
707 counter
= LLVMGetParam(function
, 15);
708 lp_build_name(counter
, "counter");
715 block
= LLVMAppendBasicBlock(function
, "entry");
716 builder
= LLVMCreateBuilder();
717 LLVMPositionBuilderAtEnd(builder
, block
);
720 * The shader input interpolation info is not explicitely baked in the
721 * shader key, but everything it derives from (TGSI, and flatshade) is
722 * already included in the shader key.
724 lp_build_interp_soa_init(&interp
,
728 a0_ptr
, dadx_ptr
, dady_ptr
,
731 /* code generated texture sampling */
732 sampler
= lp_llvm_sampler_soa_create(key
->sampler
, context_ptr
);
734 /* loop over quads in the block */
735 for(i
= 0; i
< num_fs
; ++i
) {
736 LLVMValueRef index
= LLVMConstInt(LLVMInt32Type(), i
, 0);
737 LLVMValueRef out_color
[PIPE_MAX_COLOR_BUFS
][NUM_CHANNELS
];
738 LLVMValueRef depth_ptr_i
;
741 lp_build_interp_soa_update(&interp
, i
);
743 depth_ptr_i
= LLVMBuildGEP(builder
, depth_ptr
, &index
, 1, "");
745 generate_fs(lp
, shader
, key
,
752 &fs_mask
[i
], /* output */
758 step0_ptr
, step1_ptr
, step2_ptr
, counter
);
760 for(cbuf
= 0; cbuf
< key
->nr_cbufs
; cbuf
++)
761 for(chan
= 0; chan
< NUM_CHANNELS
; ++chan
)
762 fs_out_color
[cbuf
][chan
][i
] = out_color
[cbuf
][chan
];
765 sampler
->destroy(sampler
);
767 /* Loop over color outputs / color buffers to do blending.
769 for(cbuf
= 0; cbuf
< key
->nr_cbufs
; cbuf
++) {
770 LLVMValueRef color_ptr
;
771 LLVMValueRef index
= LLVMConstInt(LLVMInt32Type(), cbuf
, 0);
772 LLVMValueRef blend_in_color
[NUM_CHANNELS
];
776 * Convert the fs's output color and mask to fit to the blending type.
778 for(chan
= 0; chan
< NUM_CHANNELS
; ++chan
) {
779 lp_build_conv(builder
, fs_type
, blend_type
,
780 fs_out_color
[cbuf
][chan
], num_fs
,
781 &blend_in_color
[chan
], 1);
782 lp_build_name(blend_in_color
[chan
], "color%d.%c", cbuf
, "rgba"[chan
]);
785 lp_build_conv_mask(builder
, fs_type
, blend_type
,
789 color_ptr
= LLVMBuildLoad(builder
,
790 LLVMBuildGEP(builder
, color_ptr_ptr
, &index
, 1, ""),
792 lp_build_name(color_ptr
, "color_ptr%d", cbuf
);
794 /* which blend/colormask state to use */
795 rt
= key
->blend
.independent_blend_enable
? cbuf
: 0;
800 generate_blend(&key
->blend
,
810 LLVMBuildRetVoid(builder
);
812 LLVMDisposeBuilder(builder
);
815 /* Verify the LLVM IR. If invalid, dump and abort */
817 if(LLVMVerifyFunction(function
, LLVMPrintMessageAction
)) {
819 lp_debug_dump_value(function
);
824 /* Apply optimizations to LLVM IR */
826 LLVMRunFunctionPassManager(screen
->pass
, function
);
828 if (gallivm_debug
& GALLIVM_DEBUG_IR
) {
829 /* Print the LLVM IR to stderr */
830 lp_debug_dump_value(function
);
835 * Translate the LLVM IR into machine code.
838 void *f
= LLVMGetPointerToGlobal(screen
->engine
, function
);
840 variant
->jit_function
[do_tri_test
] = (lp_jit_frag_func
)pointer_to_func(f
);
842 if (gallivm_debug
& GALLIVM_DEBUG_ASM
) {
850 dump_fs_variant_key(const struct lp_fragment_shader_variant_key
*key
)
854 debug_printf("fs variant %p:\n", (void *) key
);
856 if (key
->depth
.enabled
) {
857 debug_printf("depth.format = %s\n", util_format_name(key
->zsbuf_format
));
858 debug_printf("depth.func = %s\n", util_dump_func(key
->depth
.func
, TRUE
));
859 debug_printf("depth.writemask = %u\n", key
->depth
.writemask
);
862 for (i
= 0; i
< 2; ++i
) {
863 if (key
->stencil
[i
].enabled
) {
864 debug_printf("stencil[%u].func = %s\n", i
, util_dump_func(key
->stencil
[i
].func
, TRUE
));
865 debug_printf("stencil[%u].fail_op = %s\n", i
, util_dump_stencil_op(key
->stencil
[i
].fail_op
, TRUE
));
866 debug_printf("stencil[%u].zpass_op = %s\n", i
, util_dump_stencil_op(key
->stencil
[i
].zpass_op
, TRUE
));
867 debug_printf("stencil[%u].zfail_op = %s\n", i
, util_dump_stencil_op(key
->stencil
[i
].zfail_op
, TRUE
));
868 debug_printf("stencil[%u].valuemask = 0x%x\n", i
, key
->stencil
[i
].valuemask
);
869 debug_printf("stencil[%u].writemask = 0x%x\n", i
, key
->stencil
[i
].writemask
);
873 if (key
->alpha
.enabled
) {
874 debug_printf("alpha.func = %s\n", util_dump_func(key
->alpha
.func
, TRUE
));
875 debug_printf("alpha.ref_value = %f\n", key
->alpha
.ref_value
);
878 if (key
->blend
.logicop_enable
) {
879 debug_printf("blend.logicop_func = %s\n", util_dump_logicop(key
->blend
.logicop_func
, TRUE
));
881 else if (key
->blend
.rt
[0].blend_enable
) {
882 debug_printf("blend.rgb_func = %s\n", util_dump_blend_func (key
->blend
.rt
[0].rgb_func
, TRUE
));
883 debug_printf("blend.rgb_src_factor = %s\n", util_dump_blend_factor(key
->blend
.rt
[0].rgb_src_factor
, TRUE
));
884 debug_printf("blend.rgb_dst_factor = %s\n", util_dump_blend_factor(key
->blend
.rt
[0].rgb_dst_factor
, TRUE
));
885 debug_printf("blend.alpha_func = %s\n", util_dump_blend_func (key
->blend
.rt
[0].alpha_func
, TRUE
));
886 debug_printf("blend.alpha_src_factor = %s\n", util_dump_blend_factor(key
->blend
.rt
[0].alpha_src_factor
, TRUE
));
887 debug_printf("blend.alpha_dst_factor = %s\n", util_dump_blend_factor(key
->blend
.rt
[0].alpha_dst_factor
, TRUE
));
889 debug_printf("blend.colormask = 0x%x\n", key
->blend
.rt
[0].colormask
);
890 for (i
= 0; i
< PIPE_MAX_SAMPLERS
; ++i
) {
891 if (key
->sampler
[i
].format
) {
892 debug_printf("sampler[%u] = \n", i
);
893 debug_printf(" .format = %s\n",
894 util_format_name(key
->sampler
[i
].format
));
895 debug_printf(" .target = %s\n",
896 util_dump_tex_target(key
->sampler
[i
].target
, TRUE
));
897 debug_printf(" .pot = %u %u %u\n",
898 key
->sampler
[i
].pot_width
,
899 key
->sampler
[i
].pot_height
,
900 key
->sampler
[i
].pot_depth
);
901 debug_printf(" .wrap = %s %s %s\n",
902 util_dump_tex_wrap(key
->sampler
[i
].wrap_s
, TRUE
),
903 util_dump_tex_wrap(key
->sampler
[i
].wrap_t
, TRUE
),
904 util_dump_tex_wrap(key
->sampler
[i
].wrap_r
, TRUE
));
905 debug_printf(" .min_img_filter = %s\n",
906 util_dump_tex_filter(key
->sampler
[i
].min_img_filter
, TRUE
));
907 debug_printf(" .min_mip_filter = %s\n",
908 util_dump_tex_mipfilter(key
->sampler
[i
].min_mip_filter
, TRUE
));
909 debug_printf(" .mag_img_filter = %s\n",
910 util_dump_tex_filter(key
->sampler
[i
].mag_img_filter
, TRUE
));
911 if (key
->sampler
[i
].compare_mode
!= PIPE_TEX_COMPARE_NONE
)
912 debug_printf(" .compare_func = %s\n", util_dump_func(key
->sampler
[i
].compare_func
, TRUE
));
913 debug_printf(" .normalized_coords = %u\n", key
->sampler
[i
].normalized_coords
);
920 static struct lp_fragment_shader_variant
*
921 generate_variant(struct llvmpipe_context
*lp
,
922 struct lp_fragment_shader
*shader
,
923 const struct lp_fragment_shader_variant_key
*key
)
925 struct lp_fragment_shader_variant
*variant
;
927 if (gallivm_debug
& GALLIVM_DEBUG_IR
) {
928 tgsi_dump(shader
->base
.tokens
, 0);
929 dump_fs_variant_key(key
);
932 variant
= CALLOC_STRUCT(lp_fragment_shader_variant
);
936 memcpy(&variant
->key
, key
, sizeof *key
);
938 generate_fragment(lp
, shader
, variant
, RAST_WHOLE
);
939 generate_fragment(lp
, shader
, variant
, RAST_EDGE_TEST
);
941 /* TODO: most of these can be relaxed, in particular the colormask */
943 !key
->blend
.logicop_enable
&&
944 !key
->blend
.rt
[0].blend_enable
&&
945 key
->blend
.rt
[0].colormask
== 0xf &&
946 !key
->stencil
[0].enabled
&&
947 !key
->alpha
.enabled
&&
948 !key
->depth
.enabled
&&
950 !shader
->info
.uses_kill
953 /* insert new variant into linked list */
954 variant
->next
= shader
->variants
;
955 shader
->variants
= variant
;
962 llvmpipe_create_fs_state(struct pipe_context
*pipe
,
963 const struct pipe_shader_state
*templ
)
965 struct lp_fragment_shader
*shader
;
967 shader
= CALLOC_STRUCT(lp_fragment_shader
);
971 /* get/save the summary info for this shader */
972 tgsi_scan_shader(templ
->tokens
, &shader
->info
);
974 /* we need to keep a local copy of the tokens */
975 shader
->base
.tokens
= tgsi_dup_tokens(templ
->tokens
);
977 if (LP_DEBUG
& DEBUG_TGSI
) {
979 debug_printf("llvmpipe: Create fragment shader %p:\n", (void *) shader
);
980 tgsi_dump(templ
->tokens
, 0);
981 debug_printf("usage masks:\n");
982 for (attrib
= 0; attrib
< shader
->info
.num_inputs
; ++attrib
) {
983 unsigned usage_mask
= shader
->info
.input_usage_mask
[attrib
];
984 debug_printf(" IN[%u].%s%s%s%s\n",
986 usage_mask
& TGSI_WRITEMASK_X
? "x" : "",
987 usage_mask
& TGSI_WRITEMASK_Y
? "y" : "",
988 usage_mask
& TGSI_WRITEMASK_Z
? "z" : "",
989 usage_mask
& TGSI_WRITEMASK_W
? "w" : "");
999 llvmpipe_bind_fs_state(struct pipe_context
*pipe
, void *fs
)
1001 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
1003 if (llvmpipe
->fs
== fs
)
1006 draw_flush(llvmpipe
->draw
);
1010 llvmpipe
->dirty
|= LP_NEW_FS
;
1015 llvmpipe_delete_fs_state(struct pipe_context
*pipe
, void *fs
)
1017 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
1018 struct llvmpipe_screen
*screen
= llvmpipe_screen(pipe
->screen
);
1019 struct lp_fragment_shader
*shader
= fs
;
1020 struct lp_fragment_shader_variant
*variant
;
1022 assert(fs
!= llvmpipe
->fs
);
1026 * XXX: we need to flush the context until we have some sort of reference
1027 * counting in fragment shaders as they may still be binned
1029 draw_flush(llvmpipe
->draw
);
1030 lp_setup_flush(llvmpipe
->setup
, 0);
1032 variant
= shader
->variants
;
1034 struct lp_fragment_shader_variant
*next
= variant
->next
;
1037 for (i
= 0; i
< Elements(variant
->function
); i
++) {
1038 if (variant
->function
[i
]) {
1039 if (variant
->jit_function
[i
])
1040 LLVMFreeMachineCodeForFunction(screen
->engine
,
1041 variant
->function
[i
]);
1042 LLVMDeleteFunction(variant
->function
[i
]);
1051 FREE((void *) shader
->base
.tokens
);
1058 llvmpipe_set_constant_buffer(struct pipe_context
*pipe
,
1059 uint shader
, uint index
,
1060 struct pipe_resource
*constants
)
1062 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
1063 unsigned size
= constants
? constants
->width0
: 0;
1064 const void *data
= constants
? llvmpipe_resource_data(constants
) : NULL
;
1066 assert(shader
< PIPE_SHADER_TYPES
);
1069 if(llvmpipe
->constants
[shader
] == constants
)
1072 draw_flush(llvmpipe
->draw
);
1074 /* note: reference counting */
1075 pipe_resource_reference(&llvmpipe
->constants
[shader
], constants
);
1077 if(shader
== PIPE_SHADER_VERTEX
) {
1078 draw_set_mapped_constant_buffer(llvmpipe
->draw
, PIPE_SHADER_VERTEX
, 0,
1082 llvmpipe
->dirty
|= LP_NEW_CONSTANTS
;
1087 * Return the blend factor equivalent to a destination alpha of one.
1089 static INLINE
unsigned
1090 force_dst_alpha_one(unsigned factor
, boolean alpha
)
1093 case PIPE_BLENDFACTOR_DST_ALPHA
:
1094 return PIPE_BLENDFACTOR_ONE
;
1095 case PIPE_BLENDFACTOR_INV_DST_ALPHA
:
1096 return PIPE_BLENDFACTOR_ZERO
;
1097 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE
:
1098 return PIPE_BLENDFACTOR_ZERO
;
1103 case PIPE_BLENDFACTOR_DST_COLOR
:
1104 return PIPE_BLENDFACTOR_ONE
;
1105 case PIPE_BLENDFACTOR_INV_DST_COLOR
:
1106 return PIPE_BLENDFACTOR_ZERO
;
1115 * We need to generate several variants of the fragment pipeline to match
1116 * all the combinations of the contributing state atoms.
1118 * TODO: there is actually no reason to tie this to context state -- the
1119 * generated code could be cached globally in the screen.
1122 make_variant_key(struct llvmpipe_context
*lp
,
1123 struct lp_fragment_shader
*shader
,
1124 struct lp_fragment_shader_variant_key
*key
)
1128 memset(key
, 0, sizeof *key
);
1130 if (lp
->framebuffer
.zsbuf
) {
1131 if (lp
->depth_stencil
->depth
.enabled
) {
1132 key
->zsbuf_format
= lp
->framebuffer
.zsbuf
->format
;
1133 memcpy(&key
->depth
, &lp
->depth_stencil
->depth
, sizeof key
->depth
);
1135 if (lp
->depth_stencil
->stencil
[0].enabled
) {
1136 key
->zsbuf_format
= lp
->framebuffer
.zsbuf
->format
;
1137 memcpy(&key
->stencil
, &lp
->depth_stencil
->stencil
, sizeof key
->stencil
);
1141 key
->alpha
.enabled
= lp
->depth_stencil
->alpha
.enabled
;
1142 if(key
->alpha
.enabled
)
1143 key
->alpha
.func
= lp
->depth_stencil
->alpha
.func
;
1144 /* alpha.ref_value is passed in jit_context */
1146 key
->flatshade
= lp
->rasterizer
->flatshade
;
1147 key
->scissor
= lp
->rasterizer
->scissor
;
1148 if (lp
->active_query_count
) {
1149 key
->occlusion_count
= TRUE
;
1152 if (lp
->framebuffer
.nr_cbufs
) {
1153 memcpy(&key
->blend
, lp
->blend
, sizeof key
->blend
);
1156 key
->nr_cbufs
= lp
->framebuffer
.nr_cbufs
;
1157 for (i
= 0; i
< lp
->framebuffer
.nr_cbufs
; i
++) {
1158 struct pipe_rt_blend_state
*blend_rt
= &key
->blend
.rt
[i
];
1159 const struct util_format_description
*format_desc
;
1162 format_desc
= util_format_description(lp
->framebuffer
.cbufs
[i
]->format
);
1163 assert(format_desc
->colorspace
== UTIL_FORMAT_COLORSPACE_RGB
||
1164 format_desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
);
1166 blend_rt
->colormask
= lp
->blend
->rt
[i
].colormask
;
1168 /* mask out color channels not present in the color buffer.
1169 * Should be simple to incorporate per-cbuf writemasks:
1171 for(chan
= 0; chan
< 4; ++chan
) {
1172 enum util_format_swizzle swizzle
= format_desc
->swizzle
[chan
];
1174 if(swizzle
> UTIL_FORMAT_SWIZZLE_W
)
1175 blend_rt
->colormask
&= ~(1 << chan
);
1179 * Our swizzled render tiles always have an alpha channel, but the linear
1180 * render target format often does not, so force here the dst alpha to be
1183 * This is not a mere optimization. Wrong results will be produced if the
1184 * dst alpha is used, the dst format does not have alpha, and the previous
1185 * rendering was not flushed from the swizzled to linear buffer. For
1186 * example, NonPowTwo DCT.
1188 * TODO: This should be generalized to all channels for better
1189 * performance, but only alpha causes correctness issues.
1191 if (format_desc
->swizzle
[3] > UTIL_FORMAT_SWIZZLE_W
) {
1192 blend_rt
->rgb_src_factor
= force_dst_alpha_one(blend_rt
->rgb_src_factor
, FALSE
);
1193 blend_rt
->rgb_dst_factor
= force_dst_alpha_one(blend_rt
->rgb_dst_factor
, FALSE
);
1194 blend_rt
->alpha_src_factor
= force_dst_alpha_one(blend_rt
->alpha_src_factor
, TRUE
);
1195 blend_rt
->alpha_dst_factor
= force_dst_alpha_one(blend_rt
->alpha_dst_factor
, TRUE
);
1199 for(i
= 0; i
< PIPE_MAX_SAMPLERS
; ++i
)
1200 if(shader
->info
.file_mask
[TGSI_FILE_SAMPLER
] & (1 << i
))
1201 lp_sampler_static_state(&key
->sampler
[i
], lp
->fragment_sampler_views
[i
], lp
->sampler
[i
]);
1206 * Update fragment state. This is called just prior to drawing
1207 * something when some fragment-related state has changed.
1210 llvmpipe_update_fs(struct llvmpipe_context
*lp
)
1212 struct lp_fragment_shader
*shader
= lp
->fs
;
1213 struct lp_fragment_shader_variant_key key
;
1214 struct lp_fragment_shader_variant
*variant
;
1216 make_variant_key(lp
, shader
, &key
);
1218 variant
= shader
->variants
;
1220 if(memcmp(&variant
->key
, &key
, sizeof key
) == 0)
1223 variant
= variant
->next
;
1231 variant
= generate_variant(lp
, shader
, &key
);
1235 LP_COUNT_ADD(llvm_compile_time
, dt
);
1236 LP_COUNT_ADD(nr_llvm_compiles
, 2); /* emit vs. omit in/out test */
1239 lp_setup_set_fs_variant(lp
->setup
, variant
);
1245 llvmpipe_init_fs_funcs(struct llvmpipe_context
*llvmpipe
)
1247 llvmpipe
->pipe
.create_fs_state
= llvmpipe_create_fs_state
;
1248 llvmpipe
->pipe
.bind_fs_state
= llvmpipe_bind_fs_state
;
1249 llvmpipe
->pipe
.delete_fs_state
= llvmpipe_delete_fs_state
;
1251 llvmpipe
->pipe
.set_constant_buffer
= llvmpipe_set_constant_buffer
;