1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
31 * Code generate the whole fragment pipeline.
33 * The fragment pipeline consists of the following stages:
34 * - triangle edge in/out testing
40 * - depth/stencil test
43 * This file has only the glue to assemble the fragment pipeline. The actual
44 * plumbing of converting Gallium state into LLVM IR is done elsewhere, in the
45 * lp_bld_*.[ch] files, and in a complete generic and reusable way. Here we
46 * muster the LLVM JIT execution engine to create a function that follows an
47 * established binary interface and that can be called from C directly.
49 * A big source of complexity here is that we often want to run different
50 * stages with different precisions and data types and precisions. For example,
51 * the fragment shader needs typically to be done in floats, but the
52 * depth/stencil test and blending is better done in the type that most closely
53 * matches the depth/stencil and color buffer respectively.
55 * Since the width of a SIMD vector register stays the same regardless of the
56 * element type, different types imply different number of elements, so we must
57 * code generate more instances of the stages with larger types to be able to
58 * feed/consume the stages with smaller types.
60 * @author Jose Fonseca <jfonseca@vmware.com>
64 #include "pipe/p_defines.h"
65 #include "util/u_inlines.h"
66 #include "util/u_memory.h"
67 #include "util/u_pointer.h"
68 #include "util/u_format.h"
69 #include "util/u_dump.h"
70 #include "util/u_string.h"
71 #include "os/os_time.h"
72 #include "pipe/p_shader_tokens.h"
73 #include "draw/draw_context.h"
74 #include "tgsi/tgsi_dump.h"
75 #include "tgsi/tgsi_scan.h"
76 #include "tgsi/tgsi_parse.h"
77 #include "gallivm/lp_bld_type.h"
78 #include "gallivm/lp_bld_const.h"
79 #include "gallivm/lp_bld_conv.h"
80 #include "gallivm/lp_bld_intr.h"
81 #include "gallivm/lp_bld_logic.h"
82 #include "gallivm/lp_bld_tgsi.h"
83 #include "gallivm/lp_bld_swizzle.h"
84 #include "gallivm/lp_bld_flow.h"
85 #include "gallivm/lp_bld_debug.h"
87 #include "lp_bld_alpha.h"
88 #include "lp_bld_blend.h"
89 #include "lp_bld_depth.h"
90 #include "lp_bld_interp.h"
91 #include "lp_context.h"
94 #include "lp_screen.h"
97 #include "lp_tex_sample.h"
100 #include <llvm-c/Analysis.h>
103 static unsigned fs_no
= 0;
107 * Generate the depth /stencil test code.
110 generate_depth_stencil(LLVMBuilderRef builder
,
111 const struct lp_fragment_shader_variant_key
*key
,
112 struct lp_type src_type
,
113 struct lp_build_mask_context
*mask
,
114 LLVMValueRef stencil_refs
[2],
116 LLVMValueRef dst_ptr
,
118 LLVMValueRef counter
)
120 const struct util_format_description
*format_desc
;
121 struct lp_type dst_type
;
123 if (!key
->depth
.enabled
&& !key
->stencil
[0].enabled
&& !key
->stencil
[1].enabled
)
126 format_desc
= util_format_description(key
->zsbuf_format
);
130 * Depths are expected to be between 0 and 1, even if they are stored in
131 * floats. Setting these bits here will ensure that the lp_build_conv() call
132 * below won't try to unnecessarily clamp the incoming values.
134 if(src_type
.floating
) {
135 src_type
.sign
= FALSE
;
136 src_type
.norm
= TRUE
;
139 assert(!src_type
.sign
);
140 assert(src_type
.norm
);
143 /* Pick the depth type. */
144 dst_type
= lp_depth_type(format_desc
, src_type
.width
*src_type
.length
);
146 /* FIXME: Cope with a depth test type with a different bit width. */
147 assert(dst_type
.width
== src_type
.width
);
148 assert(dst_type
.length
== src_type
.length
);
150 /* Convert fragment Z from float to integer */
151 lp_build_conv(builder
, src_type
, dst_type
, &src
, 1, &src
, 1);
153 dst_ptr
= LLVMBuildBitCast(builder
,
155 LLVMPointerType(lp_build_vec_type(dst_type
), 0), "");
156 lp_build_depth_stencil_test(builder
,
171 * Generate the code to do inside/outside triangle testing for the
172 * four pixels in a 2x2 quad. This will set the four elements of the
173 * quad mask vector to 0 or ~0.
174 * \param i which quad of the quad group to test, in [0,3]
177 generate_tri_edge_mask(LLVMBuilderRef builder
,
179 LLVMValueRef
*mask
, /* ivec4, out */
180 LLVMValueRef c0
, /* int32 */
181 LLVMValueRef c1
, /* int32 */
182 LLVMValueRef c2
, /* int32 */
183 LLVMValueRef step0_ptr
, /* ivec4 */
184 LLVMValueRef step1_ptr
, /* ivec4 */
185 LLVMValueRef step2_ptr
) /* ivec4 */
187 #define OPTIMIZE_IN_OUT_TEST 0
188 #if OPTIMIZE_IN_OUT_TEST
189 struct lp_build_if_state ifctx
;
190 LLVMValueRef not_draw_all
;
192 struct lp_build_flow_context
*flow
;
193 struct lp_type i32_type
;
194 LLVMTypeRef i32vec4_type
;
195 LLVMValueRef c0_vec
, c1_vec
, c2_vec
;
196 LLVMValueRef in_out_mask
;
200 /* int32 vector type */
201 memset(&i32_type
, 0, sizeof i32_type
);
202 i32_type
.floating
= FALSE
; /* values are integers */
203 i32_type
.sign
= TRUE
; /* values are signed */
204 i32_type
.norm
= FALSE
; /* values are not normalized */
205 i32_type
.width
= 32; /* 32-bit int values */
206 i32_type
.length
= 4; /* 4 elements per vector */
208 i32vec4_type
= lp_build_int32_vec4_type();
211 * Use a conditional here to do detailed pixel in/out testing.
212 * We only have to do this if c0 != INT_MIN.
214 flow
= lp_build_flow_create(builder
);
215 lp_build_flow_scope_begin(flow
);
218 #if OPTIMIZE_IN_OUT_TEST
219 /* not_draw_all = (c0 != INT_MIN) */
220 not_draw_all
= LLVMBuildICmp(builder
,
223 LLVMConstInt(LLVMInt32Type(), INT_MIN
, 0),
226 in_out_mask
= lp_build_const_int_vec(i32_type
, ~0);
229 lp_build_flow_scope_declare(flow
, &in_out_mask
);
231 /* if (not_draw_all) {... */
232 lp_build_if(&ifctx
, flow
, builder
, not_draw_all
);
235 LLVMValueRef step0_vec
, step1_vec
, step2_vec
;
236 LLVMValueRef m0_vec
, m1_vec
, m2_vec
;
237 LLVMValueRef index
, m
;
239 /* c0_vec = {c0, c0, c0, c0}
240 * Note that we emit this code four times but LLVM optimizes away
241 * three instances of it.
243 c0_vec
= lp_build_broadcast(builder
, i32vec4_type
, c0
);
244 c1_vec
= lp_build_broadcast(builder
, i32vec4_type
, c1
);
245 c2_vec
= lp_build_broadcast(builder
, i32vec4_type
, c2
);
246 lp_build_name(c0_vec
, "edgeconst0vec");
247 lp_build_name(c1_vec
, "edgeconst1vec");
248 lp_build_name(c2_vec
, "edgeconst2vec");
250 /* load step0vec, step1, step2 vec from memory */
251 index
= LLVMConstInt(LLVMInt32Type(), i
, 0);
252 step0_vec
= LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, step0_ptr
, &index
, 1, ""), "");
253 step1_vec
= LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, step1_ptr
, &index
, 1, ""), "");
254 step2_vec
= LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, step2_ptr
, &index
, 1, ""), "");
255 lp_build_name(step0_vec
, "step0vec");
256 lp_build_name(step1_vec
, "step1vec");
257 lp_build_name(step2_vec
, "step2vec");
259 /* m0_vec = step0_ptr[i] > c0_vec */
260 m0_vec
= lp_build_compare(builder
, i32_type
, PIPE_FUNC_GREATER
, step0_vec
, c0_vec
);
261 m1_vec
= lp_build_compare(builder
, i32_type
, PIPE_FUNC_GREATER
, step1_vec
, c1_vec
);
262 m2_vec
= lp_build_compare(builder
, i32_type
, PIPE_FUNC_GREATER
, step2_vec
, c2_vec
);
264 /* in_out_mask = m0_vec & m1_vec & m2_vec */
265 m
= LLVMBuildAnd(builder
, m0_vec
, m1_vec
, "");
266 in_out_mask
= LLVMBuildAnd(builder
, m
, m2_vec
, "");
267 lp_build_name(in_out_mask
, "inoutmaskvec");
269 #if OPTIMIZE_IN_OUT_TEST
270 lp_build_endif(&ifctx
);
274 lp_build_flow_scope_end(flow
);
275 lp_build_flow_destroy(flow
);
277 /* This is the initial alive/dead pixel mask for a quad of four pixels.
278 * It's an int[4] vector with each word set to 0 or ~0.
279 * Words will get cleared when pixels faile the Z test, etc.
286 generate_scissor_test(LLVMBuilderRef builder
,
287 LLVMValueRef context_ptr
,
288 const struct lp_build_interp_soa_context
*interp
,
291 LLVMTypeRef vec_type
= lp_build_vec_type(type
);
292 LLVMValueRef xpos
= interp
->pos
[0], ypos
= interp
->pos
[1];
293 LLVMValueRef xmin
, ymin
, xmax
, ymax
;
294 LLVMValueRef m0
, m1
, m2
, m3
, m
;
296 /* xpos, ypos contain the window coords for the four pixels in the quad */
300 /* get the current scissor bounds, convert to vectors */
301 xmin
= lp_jit_context_scissor_xmin_value(builder
, context_ptr
);
302 xmin
= lp_build_broadcast(builder
, vec_type
, xmin
);
304 ymin
= lp_jit_context_scissor_ymin_value(builder
, context_ptr
);
305 ymin
= lp_build_broadcast(builder
, vec_type
, ymin
);
307 xmax
= lp_jit_context_scissor_xmax_value(builder
, context_ptr
);
308 xmax
= lp_build_broadcast(builder
, vec_type
, xmax
);
310 ymax
= lp_jit_context_scissor_ymax_value(builder
, context_ptr
);
311 ymax
= lp_build_broadcast(builder
, vec_type
, ymax
);
313 /* compare the fragment's position coordinates against the scissor bounds */
314 m0
= lp_build_compare(builder
, type
, PIPE_FUNC_GEQUAL
, xpos
, xmin
);
315 m1
= lp_build_compare(builder
, type
, PIPE_FUNC_GEQUAL
, ypos
, ymin
);
316 m2
= lp_build_compare(builder
, type
, PIPE_FUNC_LESS
, xpos
, xmax
);
317 m3
= lp_build_compare(builder
, type
, PIPE_FUNC_LESS
, ypos
, ymax
);
319 /* AND all the masks together */
320 m
= LLVMBuildAnd(builder
, m0
, m1
, "");
321 m
= LLVMBuildAnd(builder
, m
, m2
, "");
322 m
= LLVMBuildAnd(builder
, m
, m3
, "");
324 lp_build_name(m
, "scissormask");
331 build_int32_vec_const(int value
)
333 struct lp_type i32_type
;
335 memset(&i32_type
, 0, sizeof i32_type
);
336 i32_type
.floating
= FALSE
; /* values are integers */
337 i32_type
.sign
= TRUE
; /* values are signed */
338 i32_type
.norm
= FALSE
; /* values are not normalized */
339 i32_type
.width
= 32; /* 32-bit int values */
340 i32_type
.length
= 4; /* 4 elements per vector */
341 return lp_build_const_int_vec(i32_type
, value
);
347 * Generate the fragment shader, depth/stencil test, and alpha tests.
348 * \param i which quad in the tile, in range [0,3]
349 * \param do_tri_test if 1, do triangle edge in/out testing
352 generate_fs(struct llvmpipe_context
*lp
,
353 struct lp_fragment_shader
*shader
,
354 const struct lp_fragment_shader_variant_key
*key
,
355 LLVMBuilderRef builder
,
357 LLVMValueRef context_ptr
,
359 const struct lp_build_interp_soa_context
*interp
,
360 struct lp_build_sampler_soa
*sampler
,
362 LLVMValueRef (*color
)[4],
363 LLVMValueRef depth_ptr
,
365 unsigned do_tri_test
,
369 LLVMValueRef step0_ptr
,
370 LLVMValueRef step1_ptr
,
371 LLVMValueRef step2_ptr
,
372 LLVMValueRef counter
)
374 const struct tgsi_token
*tokens
= shader
->base
.tokens
;
375 LLVMTypeRef vec_type
;
376 LLVMValueRef consts_ptr
;
377 LLVMValueRef outputs
[PIPE_MAX_SHADER_OUTPUTS
][NUM_CHANNELS
];
378 LLVMValueRef z
= interp
->pos
[2];
379 LLVMValueRef stencil_refs
[2];
380 struct lp_build_flow_context
*flow
;
381 struct lp_build_mask_context mask
;
382 boolean early_depth_stencil_test
;
389 stencil_refs
[0] = lp_jit_context_stencil_ref_front_value(builder
, context_ptr
);
390 stencil_refs
[1] = lp_jit_context_stencil_ref_back_value(builder
, context_ptr
);
392 vec_type
= lp_build_vec_type(type
);
394 consts_ptr
= lp_jit_context_constants(builder
, context_ptr
);
396 flow
= lp_build_flow_create(builder
);
398 memset(outputs
, 0, sizeof outputs
);
400 lp_build_flow_scope_begin(flow
);
402 /* Declare the color and z variables */
403 for(cbuf
= 0; cbuf
< key
->nr_cbufs
; cbuf
++) {
404 for(chan
= 0; chan
< NUM_CHANNELS
; ++chan
) {
405 color
[cbuf
][chan
] = LLVMGetUndef(vec_type
);
406 lp_build_flow_scope_declare(flow
, &color
[cbuf
][chan
]);
409 lp_build_flow_scope_declare(flow
, &z
);
411 /* do triangle edge testing */
413 generate_tri_edge_mask(builder
, i
, pmask
,
414 c0
, c1
, c2
, step0_ptr
, step1_ptr
, step2_ptr
);
417 *pmask
= build_int32_vec_const(~0);
420 /* 'mask' will control execution based on quad's pixel alive/killed state */
421 lp_build_mask_begin(&mask
, flow
, type
, *pmask
);
425 generate_scissor_test(builder
, context_ptr
, interp
, type
);
426 lp_build_mask_update(&mask
, smask
);
429 early_depth_stencil_test
=
430 (key
->depth
.enabled
|| key
->stencil
[0].enabled
) &&
431 !key
->alpha
.enabled
&&
432 !shader
->info
.uses_kill
&&
433 !shader
->info
.writes_z
;
435 if (early_depth_stencil_test
)
436 generate_depth_stencil(builder
, key
,
438 stencil_refs
, z
, depth_ptr
, facing
, counter
);
440 lp_build_tgsi_soa(builder
, tokens
, type
, &mask
,
441 consts_ptr
, interp
->pos
, interp
->inputs
,
442 outputs
, sampler
, &shader
->info
);
444 /* loop over fragment shader outputs/results */
445 for (attrib
= 0; attrib
< shader
->info
.num_outputs
; ++attrib
) {
446 for(chan
= 0; chan
< NUM_CHANNELS
; ++chan
) {
447 if(outputs
[attrib
][chan
]) {
448 LLVMValueRef out
= LLVMBuildLoad(builder
, outputs
[attrib
][chan
], "");
449 lp_build_name(out
, "output%u.%u.%c", i
, attrib
, "xyzw"[chan
]);
451 switch (shader
->info
.output_semantic_name
[attrib
]) {
452 case TGSI_SEMANTIC_COLOR
:
454 unsigned cbuf
= shader
->info
.output_semantic_index
[attrib
];
456 lp_build_name(out
, "color%u.%u.%c", i
, attrib
, "rgba"[chan
]);
459 /* XXX: should the alpha reference value be passed separately? */
460 /* XXX: should only test the final assignment to alpha */
461 if(cbuf
== 0 && chan
== 3) {
462 LLVMValueRef alpha
= out
;
463 LLVMValueRef alpha_ref_value
;
464 alpha_ref_value
= lp_jit_context_alpha_ref_value(builder
, context_ptr
);
465 alpha_ref_value
= lp_build_broadcast(builder
, vec_type
, alpha_ref_value
);
466 lp_build_alpha_test(builder
, &key
->alpha
, type
,
467 &mask
, alpha
, alpha_ref_value
);
470 color
[cbuf
][chan
] = out
;
474 case TGSI_SEMANTIC_POSITION
:
483 if (!early_depth_stencil_test
)
484 generate_depth_stencil(builder
, key
,
486 stencil_refs
, z
, depth_ptr
, facing
, counter
);
488 lp_build_mask_end(&mask
);
490 lp_build_flow_scope_end(flow
);
492 lp_build_flow_destroy(flow
);
500 * Generate color blending and color output.
501 * \param rt the render target index (to index blend, colormask state)
502 * \param type the pixel color type
503 * \param context_ptr pointer to the runtime JIT context
504 * \param mask execution mask (active fragment/pixel mask)
505 * \param src colors from the fragment shader
506 * \param dst_ptr the destination color buffer pointer
509 generate_blend(const struct pipe_blend_state
*blend
,
511 LLVMBuilderRef builder
,
513 LLVMValueRef context_ptr
,
516 LLVMValueRef dst_ptr
)
518 struct lp_build_context bld
;
519 struct lp_build_flow_context
*flow
;
520 struct lp_build_mask_context mask_ctx
;
521 LLVMTypeRef vec_type
;
522 LLVMValueRef const_ptr
;
528 lp_build_context_init(&bld
, builder
, type
);
530 flow
= lp_build_flow_create(builder
);
532 /* we'll use this mask context to skip blending if all pixels are dead */
533 lp_build_mask_begin(&mask_ctx
, flow
, type
, mask
);
535 vec_type
= lp_build_vec_type(type
);
537 const_ptr
= lp_jit_context_blend_color(builder
, context_ptr
);
538 const_ptr
= LLVMBuildBitCast(builder
, const_ptr
,
539 LLVMPointerType(vec_type
, 0), "");
541 /* load constant blend color and colors from the dest color buffer */
542 for(chan
= 0; chan
< 4; ++chan
) {
543 LLVMValueRef index
= LLVMConstInt(LLVMInt32Type(), chan
, 0);
544 con
[chan
] = LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, const_ptr
, &index
, 1, ""), "");
546 dst
[chan
] = LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, dst_ptr
, &index
, 1, ""), "");
548 lp_build_name(con
[chan
], "con.%c", "rgba"[chan
]);
549 lp_build_name(dst
[chan
], "dst.%c", "rgba"[chan
]);
553 lp_build_blend_soa(builder
, blend
, type
, rt
, src
, dst
, con
, res
);
555 /* store results to color buffer */
556 for(chan
= 0; chan
< 4; ++chan
) {
557 if(blend
->rt
[rt
].colormask
& (1 << chan
)) {
558 LLVMValueRef index
= LLVMConstInt(LLVMInt32Type(), chan
, 0);
559 lp_build_name(res
[chan
], "res.%c", "rgba"[chan
]);
560 res
[chan
] = lp_build_select(&bld
, mask
, res
[chan
], dst
[chan
]);
561 LLVMBuildStore(builder
, res
[chan
], LLVMBuildGEP(builder
, dst_ptr
, &index
, 1, ""));
565 lp_build_mask_end(&mask_ctx
);
566 lp_build_flow_destroy(flow
);
571 * Generate the runtime callable function for the whole fragment pipeline.
572 * Note that the function which we generate operates on a block of 16
573 * pixels at at time. The block contains 2x2 quads. Each quad contains
577 generate_fragment(struct llvmpipe_context
*lp
,
578 struct lp_fragment_shader
*shader
,
579 struct lp_fragment_shader_variant
*variant
,
580 unsigned do_tri_test
)
582 struct llvmpipe_screen
*screen
= llvmpipe_screen(lp
->pipe
.screen
);
583 const struct lp_fragment_shader_variant_key
*key
= &variant
->key
;
585 struct lp_type fs_type
;
586 struct lp_type blend_type
;
587 LLVMTypeRef fs_elem_type
;
588 LLVMTypeRef fs_int_vec_type
;
589 LLVMTypeRef blend_vec_type
;
590 LLVMTypeRef arg_types
[16];
591 LLVMTypeRef func_type
;
592 LLVMTypeRef int32_vec4_type
= lp_build_int32_vec4_type();
593 LLVMValueRef context_ptr
;
597 LLVMValueRef dadx_ptr
;
598 LLVMValueRef dady_ptr
;
599 LLVMValueRef color_ptr_ptr
;
600 LLVMValueRef depth_ptr
;
601 LLVMValueRef c0
, c1
, c2
, step0_ptr
, step1_ptr
, step2_ptr
, counter
= NULL
;
602 LLVMBasicBlockRef block
;
603 LLVMBuilderRef builder
;
604 struct lp_build_sampler_soa
*sampler
;
605 struct lp_build_interp_soa_context interp
;
606 LLVMValueRef fs_mask
[LP_MAX_VECTOR_LENGTH
];
607 LLVMValueRef fs_out_color
[PIPE_MAX_COLOR_BUFS
][NUM_CHANNELS
][LP_MAX_VECTOR_LENGTH
];
608 LLVMValueRef blend_mask
;
609 LLVMValueRef function
;
617 /* TODO: actually pick these based on the fs and color buffer
618 * characteristics. */
620 memset(&fs_type
, 0, sizeof fs_type
);
621 fs_type
.floating
= TRUE
; /* floating point values */
622 fs_type
.sign
= TRUE
; /* values are signed */
623 fs_type
.norm
= FALSE
; /* values are not limited to [0,1] or [-1,1] */
624 fs_type
.width
= 32; /* 32-bit float */
625 fs_type
.length
= 4; /* 4 elements per vector */
626 num_fs
= 4; /* number of quads per block */
628 memset(&blend_type
, 0, sizeof blend_type
);
629 blend_type
.floating
= FALSE
; /* values are integers */
630 blend_type
.sign
= FALSE
; /* values are unsigned */
631 blend_type
.norm
= TRUE
; /* values are in [0,1] or [-1,1] */
632 blend_type
.width
= 8; /* 8-bit ubyte values */
633 blend_type
.length
= 16; /* 16 elements per vector */
636 * Generate the function prototype. Any change here must be reflected in
637 * lp_jit.h's lp_jit_frag_func function pointer type, and vice-versa.
640 fs_elem_type
= lp_build_elem_type(fs_type
);
641 fs_int_vec_type
= lp_build_int_vec_type(fs_type
);
643 blend_vec_type
= lp_build_vec_type(blend_type
);
645 util_snprintf(func_name
, sizeof(func_name
), "fs%u_variant%u_%s",
646 shader
->no
, variant
->no
, do_tri_test
? "edge" : "whole");
648 arg_types
[0] = screen
->context_ptr_type
; /* context */
649 arg_types
[1] = LLVMInt32Type(); /* x */
650 arg_types
[2] = LLVMInt32Type(); /* y */
651 arg_types
[3] = LLVMFloatType(); /* facing */
652 arg_types
[4] = LLVMPointerType(fs_elem_type
, 0); /* a0 */
653 arg_types
[5] = LLVMPointerType(fs_elem_type
, 0); /* dadx */
654 arg_types
[6] = LLVMPointerType(fs_elem_type
, 0); /* dady */
655 arg_types
[7] = LLVMPointerType(LLVMPointerType(blend_vec_type
, 0), 0); /* color */
656 arg_types
[8] = LLVMPointerType(fs_int_vec_type
, 0); /* depth */
657 arg_types
[9] = LLVMInt32Type(); /* c0 */
658 arg_types
[10] = LLVMInt32Type(); /* c1 */
659 arg_types
[11] = LLVMInt32Type(); /* c2 */
660 /* Note: the step arrays are built as int32[16] but we interpret
661 * them here as int32_vec4[4].
663 arg_types
[12] = LLVMPointerType(int32_vec4_type
, 0);/* step0 */
664 arg_types
[13] = LLVMPointerType(int32_vec4_type
, 0);/* step1 */
665 arg_types
[14] = LLVMPointerType(int32_vec4_type
, 0);/* step2 */
666 arg_types
[15] = LLVMPointerType(LLVMInt32Type(), 0);/* counter */
668 func_type
= LLVMFunctionType(LLVMVoidType(), arg_types
, Elements(arg_types
), 0);
670 function
= LLVMAddFunction(screen
->module
, func_name
, func_type
);
671 LLVMSetFunctionCallConv(function
, LLVMCCallConv
);
673 variant
->function
[do_tri_test
] = function
;
676 /* XXX: need to propagate noalias down into color param now we are
677 * passing a pointer-to-pointer?
679 for(i
= 0; i
< Elements(arg_types
); ++i
)
680 if(LLVMGetTypeKind(arg_types
[i
]) == LLVMPointerTypeKind
)
681 LLVMAddAttribute(LLVMGetParam(function
, i
), LLVMNoAliasAttribute
);
683 context_ptr
= LLVMGetParam(function
, 0);
684 x
= LLVMGetParam(function
, 1);
685 y
= LLVMGetParam(function
, 2);
686 facing
= LLVMGetParam(function
, 3);
687 a0_ptr
= LLVMGetParam(function
, 4);
688 dadx_ptr
= LLVMGetParam(function
, 5);
689 dady_ptr
= LLVMGetParam(function
, 6);
690 color_ptr_ptr
= LLVMGetParam(function
, 7);
691 depth_ptr
= LLVMGetParam(function
, 8);
692 c0
= LLVMGetParam(function
, 9);
693 c1
= LLVMGetParam(function
, 10);
694 c2
= LLVMGetParam(function
, 11);
695 step0_ptr
= LLVMGetParam(function
, 12);
696 step1_ptr
= LLVMGetParam(function
, 13);
697 step2_ptr
= LLVMGetParam(function
, 14);
699 lp_build_name(context_ptr
, "context");
700 lp_build_name(x
, "x");
701 lp_build_name(y
, "y");
702 lp_build_name(a0_ptr
, "a0");
703 lp_build_name(dadx_ptr
, "dadx");
704 lp_build_name(dady_ptr
, "dady");
705 lp_build_name(color_ptr_ptr
, "color_ptr_ptr");
706 lp_build_name(depth_ptr
, "depth");
707 lp_build_name(c0
, "c0");
708 lp_build_name(c1
, "c1");
709 lp_build_name(c2
, "c2");
710 lp_build_name(step0_ptr
, "step0");
711 lp_build_name(step1_ptr
, "step1");
712 lp_build_name(step2_ptr
, "step2");
714 if (key
->occlusion_count
) {
715 counter
= LLVMGetParam(function
, 15);
716 lp_build_name(counter
, "counter");
723 block
= LLVMAppendBasicBlock(function
, "entry");
724 builder
= LLVMCreateBuilder();
725 LLVMPositionBuilderAtEnd(builder
, block
);
728 * The shader input interpolation info is not explicitely baked in the
729 * shader key, but everything it derives from (TGSI, and flatshade) is
730 * already included in the shader key.
732 lp_build_interp_soa_init(&interp
,
736 a0_ptr
, dadx_ptr
, dady_ptr
,
739 /* code generated texture sampling */
740 sampler
= lp_llvm_sampler_soa_create(key
->sampler
, context_ptr
);
742 /* loop over quads in the block */
743 for(i
= 0; i
< num_fs
; ++i
) {
744 LLVMValueRef index
= LLVMConstInt(LLVMInt32Type(), i
, 0);
745 LLVMValueRef out_color
[PIPE_MAX_COLOR_BUFS
][NUM_CHANNELS
];
746 LLVMValueRef depth_ptr_i
;
749 lp_build_interp_soa_update(&interp
, i
);
751 depth_ptr_i
= LLVMBuildGEP(builder
, depth_ptr
, &index
, 1, "");
753 generate_fs(lp
, shader
, key
,
760 &fs_mask
[i
], /* output */
766 step0_ptr
, step1_ptr
, step2_ptr
, counter
);
768 for(cbuf
= 0; cbuf
< key
->nr_cbufs
; cbuf
++)
769 for(chan
= 0; chan
< NUM_CHANNELS
; ++chan
)
770 fs_out_color
[cbuf
][chan
][i
] = out_color
[cbuf
][chan
];
773 sampler
->destroy(sampler
);
775 /* Loop over color outputs / color buffers to do blending.
777 for(cbuf
= 0; cbuf
< key
->nr_cbufs
; cbuf
++) {
778 LLVMValueRef color_ptr
;
779 LLVMValueRef index
= LLVMConstInt(LLVMInt32Type(), cbuf
, 0);
780 LLVMValueRef blend_in_color
[NUM_CHANNELS
];
784 * Convert the fs's output color and mask to fit to the blending type.
786 for(chan
= 0; chan
< NUM_CHANNELS
; ++chan
) {
787 lp_build_conv(builder
, fs_type
, blend_type
,
788 fs_out_color
[cbuf
][chan
], num_fs
,
789 &blend_in_color
[chan
], 1);
790 lp_build_name(blend_in_color
[chan
], "color%d.%c", cbuf
, "rgba"[chan
]);
793 lp_build_conv_mask(builder
, fs_type
, blend_type
,
797 color_ptr
= LLVMBuildLoad(builder
,
798 LLVMBuildGEP(builder
, color_ptr_ptr
, &index
, 1, ""),
800 lp_build_name(color_ptr
, "color_ptr%d", cbuf
);
802 /* which blend/colormask state to use */
803 rt
= key
->blend
.independent_blend_enable
? cbuf
: 0;
808 generate_blend(&key
->blend
,
818 LLVMBuildRetVoid(builder
);
820 LLVMDisposeBuilder(builder
);
823 /* Verify the LLVM IR. If invalid, dump and abort */
825 if(LLVMVerifyFunction(function
, LLVMPrintMessageAction
)) {
827 lp_debug_dump_value(function
);
832 /* Apply optimizations to LLVM IR */
834 LLVMRunFunctionPassManager(screen
->pass
, function
);
836 if (gallivm_debug
& GALLIVM_DEBUG_IR
) {
837 /* Print the LLVM IR to stderr */
838 lp_debug_dump_value(function
);
843 * Translate the LLVM IR into machine code.
846 void *f
= LLVMGetPointerToGlobal(screen
->engine
, function
);
848 variant
->jit_function
[do_tri_test
] = (lp_jit_frag_func
)pointer_to_func(f
);
850 if (gallivm_debug
& GALLIVM_DEBUG_ASM
) {
858 dump_fs_variant_key(const struct lp_fragment_shader_variant_key
*key
)
862 debug_printf("fs variant %p:\n", (void *) key
);
864 if (key
->depth
.enabled
) {
865 debug_printf("depth.format = %s\n", util_format_name(key
->zsbuf_format
));
866 debug_printf("depth.func = %s\n", util_dump_func(key
->depth
.func
, TRUE
));
867 debug_printf("depth.writemask = %u\n", key
->depth
.writemask
);
870 for (i
= 0; i
< 2; ++i
) {
871 if (key
->stencil
[i
].enabled
) {
872 debug_printf("stencil[%u].func = %s\n", i
, util_dump_func(key
->stencil
[i
].func
, TRUE
));
873 debug_printf("stencil[%u].fail_op = %s\n", i
, util_dump_stencil_op(key
->stencil
[i
].fail_op
, TRUE
));
874 debug_printf("stencil[%u].zpass_op = %s\n", i
, util_dump_stencil_op(key
->stencil
[i
].zpass_op
, TRUE
));
875 debug_printf("stencil[%u].zfail_op = %s\n", i
, util_dump_stencil_op(key
->stencil
[i
].zfail_op
, TRUE
));
876 debug_printf("stencil[%u].valuemask = 0x%x\n", i
, key
->stencil
[i
].valuemask
);
877 debug_printf("stencil[%u].writemask = 0x%x\n", i
, key
->stencil
[i
].writemask
);
881 if (key
->alpha
.enabled
) {
882 debug_printf("alpha.func = %s\n", util_dump_func(key
->alpha
.func
, TRUE
));
883 debug_printf("alpha.ref_value = %f\n", key
->alpha
.ref_value
);
886 if (key
->blend
.logicop_enable
) {
887 debug_printf("blend.logicop_func = %s\n", util_dump_logicop(key
->blend
.logicop_func
, TRUE
));
889 else if (key
->blend
.rt
[0].blend_enable
) {
890 debug_printf("blend.rgb_func = %s\n", util_dump_blend_func (key
->blend
.rt
[0].rgb_func
, TRUE
));
891 debug_printf("blend.rgb_src_factor = %s\n", util_dump_blend_factor(key
->blend
.rt
[0].rgb_src_factor
, TRUE
));
892 debug_printf("blend.rgb_dst_factor = %s\n", util_dump_blend_factor(key
->blend
.rt
[0].rgb_dst_factor
, TRUE
));
893 debug_printf("blend.alpha_func = %s\n", util_dump_blend_func (key
->blend
.rt
[0].alpha_func
, TRUE
));
894 debug_printf("blend.alpha_src_factor = %s\n", util_dump_blend_factor(key
->blend
.rt
[0].alpha_src_factor
, TRUE
));
895 debug_printf("blend.alpha_dst_factor = %s\n", util_dump_blend_factor(key
->blend
.rt
[0].alpha_dst_factor
, TRUE
));
897 debug_printf("blend.colormask = 0x%x\n", key
->blend
.rt
[0].colormask
);
898 for (i
= 0; i
< PIPE_MAX_SAMPLERS
; ++i
) {
899 if (key
->sampler
[i
].format
) {
900 debug_printf("sampler[%u] = \n", i
);
901 debug_printf(" .format = %s\n",
902 util_format_name(key
->sampler
[i
].format
));
903 debug_printf(" .target = %s\n",
904 util_dump_tex_target(key
->sampler
[i
].target
, TRUE
));
905 debug_printf(" .pot = %u %u %u\n",
906 key
->sampler
[i
].pot_width
,
907 key
->sampler
[i
].pot_height
,
908 key
->sampler
[i
].pot_depth
);
909 debug_printf(" .wrap = %s %s %s\n",
910 util_dump_tex_wrap(key
->sampler
[i
].wrap_s
, TRUE
),
911 util_dump_tex_wrap(key
->sampler
[i
].wrap_t
, TRUE
),
912 util_dump_tex_wrap(key
->sampler
[i
].wrap_r
, TRUE
));
913 debug_printf(" .min_img_filter = %s\n",
914 util_dump_tex_filter(key
->sampler
[i
].min_img_filter
, TRUE
));
915 debug_printf(" .min_mip_filter = %s\n",
916 util_dump_tex_mipfilter(key
->sampler
[i
].min_mip_filter
, TRUE
));
917 debug_printf(" .mag_img_filter = %s\n",
918 util_dump_tex_filter(key
->sampler
[i
].mag_img_filter
, TRUE
));
919 if (key
->sampler
[i
].compare_mode
!= PIPE_TEX_COMPARE_NONE
)
920 debug_printf(" .compare_func = %s\n", util_dump_func(key
->sampler
[i
].compare_func
, TRUE
));
921 debug_printf(" .normalized_coords = %u\n", key
->sampler
[i
].normalized_coords
);
928 static struct lp_fragment_shader_variant
*
929 generate_variant(struct llvmpipe_context
*lp
,
930 struct lp_fragment_shader
*shader
,
931 const struct lp_fragment_shader_variant_key
*key
)
933 struct lp_fragment_shader_variant
*variant
;
935 variant
= CALLOC_STRUCT(lp_fragment_shader_variant
);
939 variant
->no
= shader
->variant_no
++;
941 memcpy(&variant
->key
, key
, sizeof *key
);
943 if (gallivm_debug
& GALLIVM_DEBUG_IR
) {
944 debug_printf("llvmpipe: Creating fragment shader #%u variant #%u:\n",
945 shader
->no
, variant
->no
);
946 tgsi_dump(shader
->base
.tokens
, 0);
947 dump_fs_variant_key(key
);
950 generate_fragment(lp
, shader
, variant
, RAST_WHOLE
);
951 generate_fragment(lp
, shader
, variant
, RAST_EDGE_TEST
);
953 /* TODO: most of these can be relaxed, in particular the colormask */
955 !key
->blend
.logicop_enable
&&
956 !key
->blend
.rt
[0].blend_enable
&&
957 key
->blend
.rt
[0].colormask
== 0xf &&
958 !key
->stencil
[0].enabled
&&
959 !key
->alpha
.enabled
&&
960 !key
->depth
.enabled
&&
962 !shader
->info
.uses_kill
965 /* insert new variant into linked list */
966 variant
->next
= shader
->variants
;
967 shader
->variants
= variant
;
974 llvmpipe_create_fs_state(struct pipe_context
*pipe
,
975 const struct pipe_shader_state
*templ
)
977 struct lp_fragment_shader
*shader
;
979 shader
= CALLOC_STRUCT(lp_fragment_shader
);
983 shader
->no
= fs_no
++;
985 /* get/save the summary info for this shader */
986 tgsi_scan_shader(templ
->tokens
, &shader
->info
);
988 /* we need to keep a local copy of the tokens */
989 shader
->base
.tokens
= tgsi_dup_tokens(templ
->tokens
);
991 if (LP_DEBUG
& DEBUG_TGSI
) {
993 debug_printf("llvmpipe: Create fragment shader #%u %p:\n", shader
->no
, (void *) shader
);
994 tgsi_dump(templ
->tokens
, 0);
995 debug_printf("usage masks:\n");
996 for (attrib
= 0; attrib
< shader
->info
.num_inputs
; ++attrib
) {
997 unsigned usage_mask
= shader
->info
.input_usage_mask
[attrib
];
998 debug_printf(" IN[%u].%s%s%s%s\n",
1000 usage_mask
& TGSI_WRITEMASK_X
? "x" : "",
1001 usage_mask
& TGSI_WRITEMASK_Y
? "y" : "",
1002 usage_mask
& TGSI_WRITEMASK_Z
? "z" : "",
1003 usage_mask
& TGSI_WRITEMASK_W
? "w" : "");
1013 llvmpipe_bind_fs_state(struct pipe_context
*pipe
, void *fs
)
1015 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
1017 if (llvmpipe
->fs
== fs
)
1020 draw_flush(llvmpipe
->draw
);
1024 llvmpipe
->dirty
|= LP_NEW_FS
;
1029 llvmpipe_delete_fs_state(struct pipe_context
*pipe
, void *fs
)
1031 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
1032 struct llvmpipe_screen
*screen
= llvmpipe_screen(pipe
->screen
);
1033 struct lp_fragment_shader
*shader
= fs
;
1034 struct lp_fragment_shader_variant
*variant
;
1036 assert(fs
!= llvmpipe
->fs
);
1040 * XXX: we need to flush the context until we have some sort of reference
1041 * counting in fragment shaders as they may still be binned
1043 draw_flush(llvmpipe
->draw
);
1044 lp_setup_flush(llvmpipe
->setup
, 0);
1046 variant
= shader
->variants
;
1048 struct lp_fragment_shader_variant
*next
= variant
->next
;
1051 for (i
= 0; i
< Elements(variant
->function
); i
++) {
1052 if (variant
->function
[i
]) {
1053 if (variant
->jit_function
[i
])
1054 LLVMFreeMachineCodeForFunction(screen
->engine
,
1055 variant
->function
[i
]);
1056 LLVMDeleteFunction(variant
->function
[i
]);
1065 FREE((void *) shader
->base
.tokens
);
1072 llvmpipe_set_constant_buffer(struct pipe_context
*pipe
,
1073 uint shader
, uint index
,
1074 struct pipe_resource
*constants
)
1076 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
1077 unsigned size
= constants
? constants
->width0
: 0;
1078 const void *data
= constants
? llvmpipe_resource_data(constants
) : NULL
;
1080 assert(shader
< PIPE_SHADER_TYPES
);
1081 assert(index
< PIPE_MAX_CONSTANT_BUFFERS
);
1083 if(llvmpipe
->constants
[shader
][index
] == constants
)
1086 draw_flush(llvmpipe
->draw
);
1088 /* note: reference counting */
1089 pipe_resource_reference(&llvmpipe
->constants
[shader
][index
], constants
);
1091 if(shader
== PIPE_SHADER_VERTEX
) {
1092 draw_set_mapped_constant_buffer(llvmpipe
->draw
, PIPE_SHADER_VERTEX
, index
,
1096 llvmpipe
->dirty
|= LP_NEW_CONSTANTS
;
1101 * Return the blend factor equivalent to a destination alpha of one.
1103 static INLINE
unsigned
1104 force_dst_alpha_one(unsigned factor
, boolean alpha
)
1107 case PIPE_BLENDFACTOR_DST_ALPHA
:
1108 return PIPE_BLENDFACTOR_ONE
;
1109 case PIPE_BLENDFACTOR_INV_DST_ALPHA
:
1110 return PIPE_BLENDFACTOR_ZERO
;
1111 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE
:
1112 return PIPE_BLENDFACTOR_ZERO
;
1117 case PIPE_BLENDFACTOR_DST_COLOR
:
1118 return PIPE_BLENDFACTOR_ONE
;
1119 case PIPE_BLENDFACTOR_INV_DST_COLOR
:
1120 return PIPE_BLENDFACTOR_ZERO
;
1129 * We need to generate several variants of the fragment pipeline to match
1130 * all the combinations of the contributing state atoms.
1132 * TODO: there is actually no reason to tie this to context state -- the
1133 * generated code could be cached globally in the screen.
1136 make_variant_key(struct llvmpipe_context
*lp
,
1137 struct lp_fragment_shader
*shader
,
1138 struct lp_fragment_shader_variant_key
*key
)
1142 memset(key
, 0, sizeof *key
);
1144 if (lp
->framebuffer
.zsbuf
) {
1145 if (lp
->depth_stencil
->depth
.enabled
) {
1146 key
->zsbuf_format
= lp
->framebuffer
.zsbuf
->format
;
1147 memcpy(&key
->depth
, &lp
->depth_stencil
->depth
, sizeof key
->depth
);
1149 if (lp
->depth_stencil
->stencil
[0].enabled
) {
1150 key
->zsbuf_format
= lp
->framebuffer
.zsbuf
->format
;
1151 memcpy(&key
->stencil
, &lp
->depth_stencil
->stencil
, sizeof key
->stencil
);
1155 key
->alpha
.enabled
= lp
->depth_stencil
->alpha
.enabled
;
1156 if(key
->alpha
.enabled
)
1157 key
->alpha
.func
= lp
->depth_stencil
->alpha
.func
;
1158 /* alpha.ref_value is passed in jit_context */
1160 key
->flatshade
= lp
->rasterizer
->flatshade
;
1161 key
->scissor
= lp
->rasterizer
->scissor
;
1162 if (lp
->active_query_count
) {
1163 key
->occlusion_count
= TRUE
;
1166 if (lp
->framebuffer
.nr_cbufs
) {
1167 memcpy(&key
->blend
, lp
->blend
, sizeof key
->blend
);
1170 key
->nr_cbufs
= lp
->framebuffer
.nr_cbufs
;
1171 for (i
= 0; i
< lp
->framebuffer
.nr_cbufs
; i
++) {
1172 struct pipe_rt_blend_state
*blend_rt
= &key
->blend
.rt
[i
];
1173 const struct util_format_description
*format_desc
;
1176 format_desc
= util_format_description(lp
->framebuffer
.cbufs
[i
]->format
);
1177 assert(format_desc
->colorspace
== UTIL_FORMAT_COLORSPACE_RGB
||
1178 format_desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
);
1180 blend_rt
->colormask
= lp
->blend
->rt
[i
].colormask
;
1182 /* mask out color channels not present in the color buffer.
1183 * Should be simple to incorporate per-cbuf writemasks:
1185 for(chan
= 0; chan
< 4; ++chan
) {
1186 enum util_format_swizzle swizzle
= format_desc
->swizzle
[chan
];
1188 if(swizzle
> UTIL_FORMAT_SWIZZLE_W
)
1189 blend_rt
->colormask
&= ~(1 << chan
);
1193 * Our swizzled render tiles always have an alpha channel, but the linear
1194 * render target format often does not, so force here the dst alpha to be
1197 * This is not a mere optimization. Wrong results will be produced if the
1198 * dst alpha is used, the dst format does not have alpha, and the previous
1199 * rendering was not flushed from the swizzled to linear buffer. For
1200 * example, NonPowTwo DCT.
1202 * TODO: This should be generalized to all channels for better
1203 * performance, but only alpha causes correctness issues.
1205 if (format_desc
->swizzle
[3] > UTIL_FORMAT_SWIZZLE_W
) {
1206 blend_rt
->rgb_src_factor
= force_dst_alpha_one(blend_rt
->rgb_src_factor
, FALSE
);
1207 blend_rt
->rgb_dst_factor
= force_dst_alpha_one(blend_rt
->rgb_dst_factor
, FALSE
);
1208 blend_rt
->alpha_src_factor
= force_dst_alpha_one(blend_rt
->alpha_src_factor
, TRUE
);
1209 blend_rt
->alpha_dst_factor
= force_dst_alpha_one(blend_rt
->alpha_dst_factor
, TRUE
);
1213 for(i
= 0; i
< PIPE_MAX_SAMPLERS
; ++i
)
1214 if(shader
->info
.file_mask
[TGSI_FILE_SAMPLER
] & (1 << i
))
1215 lp_sampler_static_state(&key
->sampler
[i
], lp
->fragment_sampler_views
[i
], lp
->sampler
[i
]);
1220 * Update fragment state. This is called just prior to drawing
1221 * something when some fragment-related state has changed.
1224 llvmpipe_update_fs(struct llvmpipe_context
*lp
)
1226 struct lp_fragment_shader
*shader
= lp
->fs
;
1227 struct lp_fragment_shader_variant_key key
;
1228 struct lp_fragment_shader_variant
*variant
;
1230 make_variant_key(lp
, shader
, &key
);
1232 variant
= shader
->variants
;
1234 if(memcmp(&variant
->key
, &key
, sizeof key
) == 0)
1237 variant
= variant
->next
;
1245 variant
= generate_variant(lp
, shader
, &key
);
1249 LP_COUNT_ADD(llvm_compile_time
, dt
);
1250 LP_COUNT_ADD(nr_llvm_compiles
, 2); /* emit vs. omit in/out test */
1253 lp_setup_set_fs_variant(lp
->setup
, variant
);
1259 llvmpipe_init_fs_funcs(struct llvmpipe_context
*llvmpipe
)
1261 llvmpipe
->pipe
.create_fs_state
= llvmpipe_create_fs_state
;
1262 llvmpipe
->pipe
.bind_fs_state
= llvmpipe_bind_fs_state
;
1263 llvmpipe
->pipe
.delete_fs_state
= llvmpipe_delete_fs_state
;
1265 llvmpipe
->pipe
.set_constant_buffer
= llvmpipe_set_constant_buffer
;