1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
31 * Code generate the whole fragment pipeline.
33 * The fragment pipeline consists of the following stages:
38 * - depth/stencil test (stencil TBI)
41 * This file has only the glue to assembly the fragment pipeline. The actual
42 * plumbing of converting Gallium state into LLVM IR is done elsewhere, in the
43 * lp_bld_*.[ch] files, and in a complete generic and reusable way. Here we
44 * muster the LLVM JIT execution engine to create a function that follows an
45 * established binary interface and that can be called from C directly.
47 * A big source of complexity here is that we often want to run different
48 * stages with different precisions and data types and precisions. For example,
49 * the fragment shader needs typically to be done in floats, but the
50 * depth/stencil test and blending is better done in the type that most closely
51 * matches the depth/stencil and color buffer respectively.
53 * Since the width of a SIMD vector register stays the same regardless of the
54 * element type, different types imply different number of elements, so we must
55 * code generate more instances of the stages with larger types to be able to
56 * feed/consume the stages with smaller types.
58 * @author Jose Fonseca <jfonseca@vmware.com>
62 #include "pipe/p_defines.h"
63 #include "util/u_memory.h"
64 #include "util/u_format.h"
65 #include "util/u_debug_dump.h"
66 #include "pipe/internal/p_winsys_screen.h"
67 #include "pipe/p_shader_tokens.h"
68 #include "draw/draw_context.h"
69 #include "tgsi/tgsi_dump.h"
70 #include "tgsi/tgsi_scan.h"
71 #include "tgsi/tgsi_parse.h"
72 #include "lp_bld_type.h"
73 #include "lp_bld_const.h"
74 #include "lp_bld_conv.h"
75 #include "lp_bld_intr.h"
76 #include "lp_bld_logic.h"
77 #include "lp_bld_depth.h"
78 #include "lp_bld_interp.h"
79 #include "lp_bld_tgsi.h"
80 #include "lp_bld_alpha.h"
81 #include "lp_bld_blend.h"
82 #include "lp_bld_swizzle.h"
83 #include "lp_bld_flow.h"
84 #include "lp_bld_debug.h"
85 #include "lp_screen.h"
86 #include "lp_context.h"
87 #include "lp_buffer.h"
90 #include "lp_tex_sample.h"
94 static const unsigned char quad_offset_x
[4] = {0, 1, 0, 1};
95 static const unsigned char quad_offset_y
[4] = {0, 0, 1, 1};
99 * Derive from the quad's upper left scalar coordinates the coordinates for
100 * all other quad pixels
103 generate_pos0(LLVMBuilderRef builder
,
109 LLVMTypeRef int_elem_type
= LLVMInt32Type();
110 LLVMTypeRef int_vec_type
= LLVMVectorType(int_elem_type
, QUAD_SIZE
);
111 LLVMTypeRef elem_type
= LLVMFloatType();
112 LLVMTypeRef vec_type
= LLVMVectorType(elem_type
, QUAD_SIZE
);
113 LLVMValueRef x_offsets
[QUAD_SIZE
];
114 LLVMValueRef y_offsets
[QUAD_SIZE
];
117 x
= lp_build_broadcast(builder
, int_vec_type
, x
);
118 y
= lp_build_broadcast(builder
, int_vec_type
, y
);
120 for(i
= 0; i
< QUAD_SIZE
; ++i
) {
121 x_offsets
[i
] = LLVMConstInt(int_elem_type
, quad_offset_x
[i
], 0);
122 y_offsets
[i
] = LLVMConstInt(int_elem_type
, quad_offset_y
[i
], 0);
125 x
= LLVMBuildAdd(builder
, x
, LLVMConstVector(x_offsets
, QUAD_SIZE
), "");
126 y
= LLVMBuildAdd(builder
, y
, LLVMConstVector(y_offsets
, QUAD_SIZE
), "");
128 *x0
= LLVMBuildSIToFP(builder
, x
, vec_type
, "");
129 *y0
= LLVMBuildSIToFP(builder
, y
, vec_type
, "");
134 * Generate the depth test.
137 generate_depth(LLVMBuilderRef builder
,
138 const struct lp_fragment_shader_variant_key
*key
,
139 struct lp_type src_type
,
140 struct lp_build_mask_context
*mask
,
142 LLVMValueRef dst_ptr
)
144 const struct util_format_description
*format_desc
;
145 struct lp_type dst_type
;
147 if(!key
->depth
.enabled
)
150 format_desc
= util_format_description(key
->zsbuf_format
);
154 * Depths are expected to be between 0 and 1, even if they are stored in
155 * floats. Setting these bits here will ensure that the lp_build_conv() call
156 * below won't try to unnecessarily clamp the incoming values.
158 if(src_type
.floating
) {
159 src_type
.sign
= FALSE
;
160 src_type
.norm
= TRUE
;
163 assert(!src_type
.sign
);
164 assert(src_type
.norm
);
167 /* Pick the depth type. */
168 dst_type
= lp_depth_type(format_desc
, src_type
.width
*src_type
.length
);
170 /* FIXME: Cope with a depth test type with a different bit width. */
171 assert(dst_type
.width
== src_type
.width
);
172 assert(dst_type
.length
== src_type
.length
);
174 lp_build_conv(builder
, src_type
, dst_type
, &src
, 1, &src
, 1);
176 dst_ptr
= LLVMBuildBitCast(builder
,
178 LLVMPointerType(lp_build_vec_type(dst_type
), 0), "");
180 lp_build_depth_test(builder
,
191 * Generate the code to do inside/outside triangle testing for the
192 * four pixels in a 2x2 quad. This will set the four elements of the
193 * quad mask vector to 0 or ~0.
194 * \param i which quad of the quad group to test, in [0,3]
197 generate_tri_edge_mask(LLVMBuilderRef builder
,
199 LLVMValueRef
*mask
, /* ivec4, out */
200 LLVMValueRef c0
, /* int32 */
201 LLVMValueRef c1
, /* int32 */
202 LLVMValueRef c2
, /* int32 */
203 LLVMValueRef step0_ptr
, /* ivec4 */
204 LLVMValueRef step1_ptr
, /* ivec4 */
205 LLVMValueRef step2_ptr
) /* ivec4 */
211 m0_vec = step0_ptr[i] > c0_vec
212 m1_vec = step1_ptr[i] > c1_vec
213 m2_vec = step2_ptr[i] > c2_vec
214 mask = m0_vec & m1_vec & m2_vec
216 struct lp_build_flow_context
*flow
;
217 struct lp_build_if_state ifctx
;
218 struct lp_type i32_type
;
219 LLVMTypeRef i32vec4_type
, mask_type
;
220 LLVMValueRef c0_vec
, c1_vec
, c2_vec
;
221 LLVMValueRef not_draw_all
;
222 LLVMValueRef in_out_mask
;
226 /* int32 vector type */
227 memset(&i32_type
, 0, sizeof i32_type
);
228 i32_type
.floating
= FALSE
; /* values are integers */
229 i32_type
.sign
= TRUE
; /* values are signed */
230 i32_type
.norm
= FALSE
; /* values are not normalized */
231 i32_type
.width
= 32; /* 32-bit int values */
232 i32_type
.length
= 4; /* 4 elements per vector */
234 i32vec4_type
= lp_build_int32_vec4_type();
236 mask_type
= LLVMIntType(32 * 4);
239 * Use a conditional here to do detailed pixel in/out testing.
240 * We only have to do this if c0 != {INT_MIN, INT_MIN, INT_MIN, INT_MIN}
242 flow
= lp_build_flow_create(builder
);
243 lp_build_flow_scope_begin(flow
);
246 #define OPTIMIZE_IN_OUT_TEST 1
247 #if OPTIMIZE_IN_OUT_TEST
249 not_draw_all
= LLVMBuildICmp(builder
,
252 LLVMConstInt(LLVMInt32Type(), INT_MIN
, 0),
255 in_out_mask
= lp_build_int_const_scalar(i32_type
, ~0);
258 lp_build_flow_scope_declare(flow
, &in_out_mask
);
260 lp_build_if(&ifctx
, flow
, builder
, not_draw_all
);
263 LLVMValueRef step0_vec
, step1_vec
, step2_vec
;
264 LLVMValueRef m0_vec
, m1_vec
, m2_vec
;
265 LLVMValueRef index
, m
;
267 /* c0_vec = {c0, c0, c0, c0}
268 * Note that we emit this code four times but LLVM optimizes away
269 * three instances of it.
271 c0_vec
= lp_build_broadcast(builder
, i32vec4_type
, c0
);
272 c1_vec
= lp_build_broadcast(builder
, i32vec4_type
, c1
);
273 c2_vec
= lp_build_broadcast(builder
, i32vec4_type
, c2
);
274 lp_build_name(c0_vec
, "edgeconst0vec");
275 lp_build_name(c1_vec
, "edgeconst1vec");
276 lp_build_name(c2_vec
, "edgeconst2vec");
279 index
= LLVMConstInt(LLVMInt32Type(), i
, 0);
280 step0_vec
= LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, step0_ptr
, &index
, 1, ""), "");
281 step1_vec
= LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, step1_ptr
, &index
, 1, ""), "");
282 step2_vec
= LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, step2_ptr
, &index
, 1, ""), "");
284 lp_build_name(step0_vec
, "step0vec");
285 lp_build_name(step1_vec
, "step1vec");
286 lp_build_name(step2_vec
, "step2vec");
288 m0_vec
= lp_build_compare(builder
, i32_type
, PIPE_FUNC_GREATER
, step0_vec
, c0_vec
);
289 m1_vec
= lp_build_compare(builder
, i32_type
, PIPE_FUNC_GREATER
, step1_vec
, c1_vec
);
290 m2_vec
= lp_build_compare(builder
, i32_type
, PIPE_FUNC_GREATER
, step2_vec
, c2_vec
);
292 m
= LLVMBuildAnd(builder
, m0_vec
, m1_vec
, "");
293 in_out_mask
= LLVMBuildAnd(builder
, m
, m2_vec
, "");
294 lp_build_name(in_out_mask
, "inoutmaskvec");
296 /* This is the initial alive/dead pixel mask. Additional bits will get cleared
297 * when the Z test fails, etc.
300 #if OPTIMIZE_IN_OUT_TEST
301 lp_build_endif(&ifctx
);
305 lp_build_flow_scope_end(flow
);
306 lp_build_flow_destroy(flow
);
313 * Generate the fragment shader, depth/stencil test, and alpha tests.
314 * \param i which quad in the tile, in range [0,3]
317 generate_fs(struct llvmpipe_context
*lp
,
318 struct lp_fragment_shader
*shader
,
319 const struct lp_fragment_shader_variant_key
*key
,
320 LLVMBuilderRef builder
,
322 LLVMValueRef context_ptr
,
324 const struct lp_build_interp_soa_context
*interp
,
325 struct lp_build_sampler_soa
*sampler
,
327 LLVMValueRef (*color
)[4],
328 LLVMValueRef depth_ptr
,
332 LLVMValueRef step0_ptr
,
333 LLVMValueRef step1_ptr
,
334 LLVMValueRef step2_ptr
)
336 const struct tgsi_token
*tokens
= shader
->base
.tokens
;
337 LLVMTypeRef elem_type
;
338 LLVMTypeRef vec_type
;
339 LLVMTypeRef int_vec_type
;
340 LLVMValueRef consts_ptr
;
341 LLVMValueRef outputs
[PIPE_MAX_SHADER_OUTPUTS
][NUM_CHANNELS
];
342 LLVMValueRef z
= interp
->pos
[2];
343 struct lp_build_flow_context
*flow
;
344 struct lp_build_mask_context mask
;
345 boolean early_depth_test
;
352 elem_type
= lp_build_elem_type(type
);
353 vec_type
= lp_build_vec_type(type
);
354 int_vec_type
= lp_build_int_vec_type(type
);
356 consts_ptr
= lp_jit_context_constants(builder
, context_ptr
);
358 flow
= lp_build_flow_create(builder
);
360 memset(outputs
, 0, sizeof outputs
);
362 lp_build_flow_scope_begin(flow
);
364 /* Declare the color and z variables */
365 for(cbuf
= 0; cbuf
< key
->nr_cbufs
; cbuf
++) {
366 for(chan
= 0; chan
< NUM_CHANNELS
; ++chan
) {
367 color
[cbuf
][chan
] = LLVMGetUndef(vec_type
);
368 lp_build_flow_scope_declare(flow
, &color
[cbuf
][chan
]);
371 lp_build_flow_scope_declare(flow
, &z
);
373 /* do triangle edge testing */
374 generate_tri_edge_mask(builder
, i
, pmask
,
375 c0
, c1
, c2
, step0_ptr
, step1_ptr
, step2_ptr
);
377 /* 'mask' will control execution based on quad's pixel alive/killed state */
378 lp_build_mask_begin(&mask
, flow
, type
, *pmask
);
382 key
->depth
.enabled
&&
383 !key
->alpha
.enabled
&&
384 !shader
->info
.uses_kill
&&
385 !shader
->info
.writes_z
;
388 generate_depth(builder
, key
,
392 lp_build_tgsi_soa(builder
, tokens
, type
, &mask
,
393 consts_ptr
, interp
->pos
, interp
->inputs
,
396 for (attrib
= 0; attrib
< shader
->info
.num_outputs
; ++attrib
) {
397 for(chan
= 0; chan
< NUM_CHANNELS
; ++chan
) {
398 if(outputs
[attrib
][chan
]) {
399 lp_build_name(outputs
[attrib
][chan
], "output%u.%u.%c", i
, attrib
, "xyzw"[chan
]);
401 switch (shader
->info
.output_semantic_name
[attrib
]) {
402 case TGSI_SEMANTIC_COLOR
:
404 unsigned cbuf
= shader
->info
.output_semantic_index
[attrib
];
406 lp_build_name(outputs
[attrib
][chan
], "color%u.%u.%c", i
, attrib
, "rgba"[chan
]);
409 /* XXX: should the alpha reference value be passed separately? */
410 /* XXX: should only test the final assignment to alpha */
411 if(cbuf
== 0 && chan
== 3) {
412 LLVMValueRef alpha
= outputs
[attrib
][chan
];
413 LLVMValueRef alpha_ref_value
;
414 alpha_ref_value
= lp_jit_context_alpha_ref_value(builder
, context_ptr
);
415 alpha_ref_value
= lp_build_broadcast(builder
, vec_type
, alpha_ref_value
);
416 lp_build_alpha_test(builder
, &key
->alpha
, type
,
417 &mask
, alpha
, alpha_ref_value
);
420 color
[cbuf
][chan
] = outputs
[attrib
][chan
];
424 case TGSI_SEMANTIC_POSITION
:
426 z
= outputs
[attrib
][chan
];
433 if(!early_depth_test
)
434 generate_depth(builder
, key
,
438 lp_build_mask_end(&mask
);
440 lp_build_flow_scope_end(flow
);
442 lp_build_flow_destroy(flow
);
450 * Generate color blending and color output.
453 generate_blend(const struct pipe_blend_state
*blend
,
454 LLVMBuilderRef builder
,
456 LLVMValueRef context_ptr
,
459 LLVMValueRef dst_ptr
)
461 struct lp_build_context bld
;
462 struct lp_build_flow_context
*flow
;
463 struct lp_build_mask_context mask_ctx
;
464 LLVMTypeRef vec_type
;
465 LLVMTypeRef int_vec_type
;
466 LLVMValueRef const_ptr
;
472 lp_build_context_init(&bld
, builder
, type
);
474 flow
= lp_build_flow_create(builder
);
476 /* we'll use this mask context to skip blending if all pixels are dead */
477 lp_build_mask_begin(&mask_ctx
, flow
, type
, mask
);
479 vec_type
= lp_build_vec_type(type
);
480 int_vec_type
= lp_build_int_vec_type(type
);
482 const_ptr
= lp_jit_context_blend_color(builder
, context_ptr
);
483 const_ptr
= LLVMBuildBitCast(builder
, const_ptr
,
484 LLVMPointerType(vec_type
, 0), "");
486 for(chan
= 0; chan
< 4; ++chan
) {
487 LLVMValueRef index
= LLVMConstInt(LLVMInt32Type(), chan
, 0);
488 con
[chan
] = LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, const_ptr
, &index
, 1, ""), "");
490 dst
[chan
] = LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, dst_ptr
, &index
, 1, ""), "");
492 lp_build_name(con
[chan
], "con.%c", "rgba"[chan
]);
493 lp_build_name(dst
[chan
], "dst.%c", "rgba"[chan
]);
496 lp_build_blend_soa(builder
, blend
, type
, src
, dst
, con
, res
);
498 for(chan
= 0; chan
< 4; ++chan
) {
499 if(blend
->colormask
& (1 << chan
)) {
500 LLVMValueRef index
= LLVMConstInt(LLVMInt32Type(), chan
, 0);
501 lp_build_name(res
[chan
], "res.%c", "rgba"[chan
]);
502 res
[chan
] = lp_build_select(&bld
, mask
, res
[chan
], dst
[chan
]);
503 LLVMBuildStore(builder
, res
[chan
], LLVMBuildGEP(builder
, dst_ptr
, &index
, 1, ""));
507 lp_build_mask_end(&mask_ctx
);
508 lp_build_flow_destroy(flow
);
513 * Generate the runtime callable function for the whole fragment pipeline.
514 * Note that the function which we generate operates on a block of 16
515 * pixels at at time. The block contains 2x2 quads. Each quad contains
519 generate_fragment(struct llvmpipe_context
*lp
,
520 struct lp_fragment_shader
*shader
,
521 struct lp_fragment_shader_variant
*variant
)
523 struct llvmpipe_screen
*screen
= llvmpipe_screen(lp
->pipe
.screen
);
524 const struct lp_fragment_shader_variant_key
*key
= &variant
->key
;
525 struct lp_type fs_type
;
526 struct lp_type blend_type
;
527 LLVMTypeRef fs_elem_type
;
528 LLVMTypeRef fs_vec_type
;
529 LLVMTypeRef fs_int_vec_type
;
530 LLVMTypeRef blend_vec_type
;
531 LLVMTypeRef blend_int_vec_type
;
532 LLVMTypeRef arg_types
[14];
533 LLVMTypeRef func_type
;
534 LLVMTypeRef int32_vec4_type
= lp_build_int32_vec4_type();
535 LLVMValueRef context_ptr
;
539 LLVMValueRef dadx_ptr
;
540 LLVMValueRef dady_ptr
;
541 LLVMValueRef color_ptr_ptr
;
542 LLVMValueRef depth_ptr
;
543 LLVMValueRef c0
, c1
, c2
, step0_ptr
, step1_ptr
, step2_ptr
;
544 LLVMBasicBlockRef block
;
545 LLVMBuilderRef builder
;
548 struct lp_build_sampler_soa
*sampler
;
549 struct lp_build_interp_soa_context interp
;
550 LLVMValueRef fs_mask
[LP_MAX_VECTOR_LENGTH
];
551 LLVMValueRef fs_out_color
[PIPE_MAX_COLOR_BUFS
][NUM_CHANNELS
][LP_MAX_VECTOR_LENGTH
];
552 LLVMValueRef blend_mask
;
553 LLVMValueRef blend_in_color
[NUM_CHANNELS
];
560 /* TODO: actually pick these based on the fs and color buffer
561 * characteristics. */
563 memset(&fs_type
, 0, sizeof fs_type
);
564 fs_type
.floating
= TRUE
; /* floating point values */
565 fs_type
.sign
= TRUE
; /* values are signed */
566 fs_type
.norm
= FALSE
; /* values are not limited to [0,1] or [-1,1] */
567 fs_type
.width
= 32; /* 32-bit float */
568 fs_type
.length
= 4; /* 4 elements per vector */
569 num_fs
= 4; /* number of quads per block */
571 memset(&blend_type
, 0, sizeof blend_type
);
572 blend_type
.floating
= FALSE
; /* values are integers */
573 blend_type
.sign
= FALSE
; /* values are unsigned */
574 blend_type
.norm
= TRUE
; /* values are in [0,1] or [-1,1] */
575 blend_type
.width
= 8; /* 8-bit ubyte values */
576 blend_type
.length
= 16; /* 16 elements per vector */
579 * Generate the function prototype. Any change here must be reflected in
580 * lp_jit.h's lp_jit_frag_func function pointer type, and vice-versa.
583 fs_elem_type
= lp_build_elem_type(fs_type
);
584 fs_vec_type
= lp_build_vec_type(fs_type
);
585 fs_int_vec_type
= lp_build_int_vec_type(fs_type
);
587 blend_vec_type
= lp_build_vec_type(blend_type
);
588 blend_int_vec_type
= lp_build_int_vec_type(blend_type
);
590 arg_types
[0] = screen
->context_ptr_type
; /* context */
591 arg_types
[1] = LLVMInt32Type(); /* x */
592 arg_types
[2] = LLVMInt32Type(); /* y */
593 arg_types
[3] = LLVMPointerType(fs_elem_type
, 0); /* a0 */
594 arg_types
[4] = LLVMPointerType(fs_elem_type
, 0); /* dadx */
595 arg_types
[5] = LLVMPointerType(fs_elem_type
, 0); /* dady */
596 arg_types
[6] = LLVMPointerType(LLVMPointerType(blend_vec_type
, 0), 0); /* color */
597 arg_types
[7] = LLVMPointerType(fs_int_vec_type
, 0); /* depth */
598 arg_types
[8] = LLVMInt32Type(); /* c0 */
599 arg_types
[9] = LLVMInt32Type(); /* c1 */
600 arg_types
[10] = LLVMInt32Type(); /* c2 */
601 /* Note: the step arrays are built as int32[16] but we interpret
602 * them here as int32_vec4[4].
604 arg_types
[11] = LLVMPointerType(int32_vec4_type
, 0);/* step0 */
605 arg_types
[12] = LLVMPointerType(int32_vec4_type
, 0);/* step1 */
606 arg_types
[13] = LLVMPointerType(int32_vec4_type
, 0);/* step2 */
608 func_type
= LLVMFunctionType(LLVMVoidType(), arg_types
, Elements(arg_types
), 0);
610 variant
->function
= LLVMAddFunction(screen
->module
, "shader", func_type
);
611 LLVMSetFunctionCallConv(variant
->function
, LLVMCCallConv
);
613 /* XXX: need to propagate noalias down into color param now we are
614 * passing a pointer-to-pointer?
616 for(i
= 0; i
< Elements(arg_types
); ++i
)
617 if(LLVMGetTypeKind(arg_types
[i
]) == LLVMPointerTypeKind
)
618 LLVMAddAttribute(LLVMGetParam(variant
->function
, i
), LLVMNoAliasAttribute
);
620 context_ptr
= LLVMGetParam(variant
->function
, 0);
621 x
= LLVMGetParam(variant
->function
, 1);
622 y
= LLVMGetParam(variant
->function
, 2);
623 a0_ptr
= LLVMGetParam(variant
->function
, 3);
624 dadx_ptr
= LLVMGetParam(variant
->function
, 4);
625 dady_ptr
= LLVMGetParam(variant
->function
, 5);
626 color_ptr_ptr
= LLVMGetParam(variant
->function
, 6);
627 depth_ptr
= LLVMGetParam(variant
->function
, 7);
628 c0
= LLVMGetParam(variant
->function
, 8);
629 c1
= LLVMGetParam(variant
->function
, 9);
630 c2
= LLVMGetParam(variant
->function
, 10);
631 step0_ptr
= LLVMGetParam(variant
->function
, 11);
632 step1_ptr
= LLVMGetParam(variant
->function
, 12);
633 step2_ptr
= LLVMGetParam(variant
->function
, 13);
635 lp_build_name(context_ptr
, "context");
636 lp_build_name(x
, "x");
637 lp_build_name(y
, "y");
638 lp_build_name(a0_ptr
, "a0");
639 lp_build_name(dadx_ptr
, "dadx");
640 lp_build_name(dady_ptr
, "dady");
641 lp_build_name(color_ptr_ptr
, "color_ptr");
642 lp_build_name(depth_ptr
, "depth");
643 lp_build_name(c0
, "c0");
644 lp_build_name(c1
, "c1");
645 lp_build_name(c2
, "c2");
646 lp_build_name(step0_ptr
, "step0");
647 lp_build_name(step1_ptr
, "step1");
648 lp_build_name(step2_ptr
, "step2");
654 block
= LLVMAppendBasicBlock(variant
->function
, "entry");
655 builder
= LLVMCreateBuilder();
656 LLVMPositionBuilderAtEnd(builder
, block
);
658 generate_pos0(builder
, x
, y
, &x0
, &y0
);
660 lp_build_interp_soa_init(&interp
,
664 a0_ptr
, dadx_ptr
, dady_ptr
,
667 /* code generated texture sampling */
668 sampler
= lp_llvm_sampler_soa_create(key
->sampler
, context_ptr
);
670 /* loop over quads in the block */
671 for(i
= 0; i
< num_fs
; ++i
) {
672 LLVMValueRef index
= LLVMConstInt(LLVMInt32Type(), i
, 0);
673 LLVMValueRef out_color
[PIPE_MAX_COLOR_BUFS
][NUM_CHANNELS
];
674 LLVMValueRef depth_ptr_i
;
678 lp_build_interp_soa_update(&interp
, i
);
680 depth_ptr_i
= LLVMBuildGEP(builder
, depth_ptr
, &index
, 1, "");
682 generate_fs(lp
, shader
, key
,
689 &fs_mask
[i
], /* output */
693 step0_ptr
, step1_ptr
, step2_ptr
);
695 for(cbuf
= 0; cbuf
< key
->nr_cbufs
; cbuf
++)
696 for(chan
= 0; chan
< NUM_CHANNELS
; ++chan
)
697 fs_out_color
[cbuf
][chan
][i
] = out_color
[cbuf
][chan
];
700 sampler
->destroy(sampler
);
702 /* Loop over color outputs / color buffers to do blending.
704 for(cbuf
= 0; cbuf
< key
->nr_cbufs
; cbuf
++) {
705 LLVMValueRef color_ptr
;
706 LLVMValueRef index
= LLVMConstInt(LLVMInt32Type(), cbuf
, 0);
709 * Convert the fs's output color and mask to fit to the blending type.
711 for(chan
= 0; chan
< NUM_CHANNELS
; ++chan
) {
712 lp_build_conv(builder
, fs_type
, blend_type
,
713 fs_out_color
[cbuf
][chan
], num_fs
,
714 &blend_in_color
[chan
], 1);
715 lp_build_name(blend_in_color
[chan
], "color%d.%c", cbuf
, "rgba"[chan
]);
718 lp_build_conv_mask(builder
, fs_type
, blend_type
,
722 color_ptr
= LLVMBuildLoad(builder
,
723 LLVMBuildGEP(builder
, color_ptr_ptr
, &index
, 1, ""),
725 lp_build_name(color_ptr
, "color_ptr%d", cbuf
);
730 generate_blend(&key
->blend
,
739 LLVMBuildRetVoid(builder
);
741 LLVMDisposeBuilder(builder
);
744 /* Verify the LLVM IR. If invalid, dump and abort */
746 if(LLVMVerifyFunction(variant
->function
, LLVMPrintMessageAction
)) {
748 LLVMDumpValue(variant
->function
);
753 /* Apply optimizations to LLVM IR */
755 LLVMRunFunctionPassManager(screen
->pass
, variant
->function
);
757 if (LP_DEBUG
& DEBUG_JIT
) {
758 /* Print the LLVM IR to stderr */
759 LLVMDumpValue(variant
->function
);
764 * Translate the LLVM IR into machine code.
766 variant
->jit_function
= (lp_jit_frag_func
)LLVMGetPointerToGlobal(screen
->engine
, variant
->function
);
768 if (LP_DEBUG
& DEBUG_ASM
)
769 lp_disassemble(variant
->jit_function
);
771 variant
->next
= shader
->variants
;
772 shader
->variants
= variant
;
776 static struct lp_fragment_shader_variant
*
777 generate_variant(struct llvmpipe_context
*lp
,
778 struct lp_fragment_shader
*shader
,
779 const struct lp_fragment_shader_variant_key
*key
)
781 struct lp_fragment_shader_variant
*variant
;
783 if (LP_DEBUG
& DEBUG_JIT
) {
786 tgsi_dump(shader
->base
.tokens
, 0);
787 if(key
->depth
.enabled
) {
788 debug_printf("depth.format = %s\n", pf_name(key
->zsbuf_format
));
789 debug_printf("depth.func = %s\n", debug_dump_func(key
->depth
.func
, TRUE
));
790 debug_printf("depth.writemask = %u\n", key
->depth
.writemask
);
792 if(key
->alpha
.enabled
) {
793 debug_printf("alpha.func = %s\n", debug_dump_func(key
->alpha
.func
, TRUE
));
794 debug_printf("alpha.ref_value = %f\n", key
->alpha
.ref_value
);
796 if(key
->blend
.logicop_enable
) {
797 debug_printf("blend.logicop_func = %u\n", key
->blend
.logicop_func
);
799 else if(key
->blend
.blend_enable
) {
800 debug_printf("blend.rgb_func = %s\n", debug_dump_blend_func (key
->blend
.rgb_func
, TRUE
));
801 debug_printf("rgb_src_factor = %s\n", debug_dump_blend_factor(key
->blend
.rgb_src_factor
, TRUE
));
802 debug_printf("rgb_dst_factor = %s\n", debug_dump_blend_factor(key
->blend
.rgb_dst_factor
, TRUE
));
803 debug_printf("alpha_func = %s\n", debug_dump_blend_func (key
->blend
.alpha_func
, TRUE
));
804 debug_printf("alpha_src_factor = %s\n", debug_dump_blend_factor(key
->blend
.alpha_src_factor
, TRUE
));
805 debug_printf("alpha_dst_factor = %s\n", debug_dump_blend_factor(key
->blend
.alpha_dst_factor
, TRUE
));
807 debug_printf("blend.colormask = 0x%x\n", key
->blend
.colormask
);
808 for(i
= 0; i
< PIPE_MAX_SAMPLERS
; ++i
) {
809 if(key
->sampler
[i
].format
) {
810 debug_printf("sampler[%u] = \n", i
);
811 debug_printf(" .format = %s\n",
812 pf_name(key
->sampler
[i
].format
));
813 debug_printf(" .target = %s\n",
814 debug_dump_tex_target(key
->sampler
[i
].target
, TRUE
));
815 debug_printf(" .pot = %u %u %u\n",
816 key
->sampler
[i
].pot_width
,
817 key
->sampler
[i
].pot_height
,
818 key
->sampler
[i
].pot_depth
);
819 debug_printf(" .wrap = %s %s %s\n",
820 debug_dump_tex_wrap(key
->sampler
[i
].wrap_s
, TRUE
),
821 debug_dump_tex_wrap(key
->sampler
[i
].wrap_t
, TRUE
),
822 debug_dump_tex_wrap(key
->sampler
[i
].wrap_r
, TRUE
));
823 debug_printf(" .min_img_filter = %s\n",
824 debug_dump_tex_filter(key
->sampler
[i
].min_img_filter
, TRUE
));
825 debug_printf(" .min_mip_filter = %s\n",
826 debug_dump_tex_mipfilter(key
->sampler
[i
].min_mip_filter
, TRUE
));
827 debug_printf(" .mag_img_filter = %s\n",
828 debug_dump_tex_filter(key
->sampler
[i
].mag_img_filter
, TRUE
));
829 if(key
->sampler
[i
].compare_mode
!= PIPE_TEX_COMPARE_NONE
)
830 debug_printf(" .compare_func = %s\n", debug_dump_func(key
->sampler
[i
].compare_func
, TRUE
));
831 debug_printf(" .normalized_coords = %u\n", key
->sampler
[i
].normalized_coords
);
832 debug_printf(" .prefilter = %u\n", key
->sampler
[i
].prefilter
);
837 variant
= CALLOC_STRUCT(lp_fragment_shader_variant
);
841 variant
->shader
= shader
;
842 memcpy(&variant
->key
, key
, sizeof *key
);
844 generate_fragment(lp
, shader
, variant
);
851 llvmpipe_create_fs_state(struct pipe_context
*pipe
,
852 const struct pipe_shader_state
*templ
)
854 struct lp_fragment_shader
*shader
;
856 shader
= CALLOC_STRUCT(lp_fragment_shader
);
860 /* get/save the summary info for this shader */
861 tgsi_scan_shader(templ
->tokens
, &shader
->info
);
863 /* we need to keep a local copy of the tokens */
864 shader
->base
.tokens
= tgsi_dup_tokens(templ
->tokens
);
871 llvmpipe_bind_fs_state(struct pipe_context
*pipe
, void *fs
)
873 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
875 if (llvmpipe
->fs
== fs
)
878 draw_flush(llvmpipe
->draw
);
882 llvmpipe
->dirty
|= LP_NEW_FS
;
887 llvmpipe_delete_fs_state(struct pipe_context
*pipe
, void *fs
)
889 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
890 struct llvmpipe_screen
*screen
= llvmpipe_screen(pipe
->screen
);
891 struct lp_fragment_shader
*shader
= fs
;
892 struct lp_fragment_shader_variant
*variant
;
894 assert(fs
!= llvmpipe
->fs
);
897 variant
= shader
->variants
;
899 struct lp_fragment_shader_variant
*next
= variant
->next
;
901 if(variant
->function
) {
902 if(variant
->jit_function
)
903 LLVMFreeMachineCodeForFunction(screen
->engine
, variant
->function
);
904 LLVMDeleteFunction(variant
->function
);
912 FREE((void *) shader
->base
.tokens
);
919 llvmpipe_set_constant_buffer(struct pipe_context
*pipe
,
920 uint shader
, uint index
,
921 const struct pipe_constant_buffer
*constants
)
923 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
924 struct pipe_buffer
*buffer
= constants
? constants
->buffer
: NULL
;
925 unsigned size
= buffer
? buffer
->size
: 0;
926 const void *data
= buffer
? llvmpipe_buffer(buffer
)->data
: NULL
;
928 assert(shader
< PIPE_SHADER_TYPES
);
931 if(llvmpipe
->constants
[shader
].buffer
== buffer
)
934 draw_flush(llvmpipe
->draw
);
936 /* note: reference counting */
937 pipe_buffer_reference(&llvmpipe
->constants
[shader
].buffer
, buffer
);
939 if(shader
== PIPE_SHADER_VERTEX
) {
940 draw_set_mapped_constant_buffer(llvmpipe
->draw
, PIPE_SHADER_VERTEX
,
944 llvmpipe
->dirty
|= LP_NEW_CONSTANTS
;
949 * We need to generate several variants of the fragment pipeline to match
950 * all the combinations of the contributing state atoms.
952 * TODO: there is actually no reason to tie this to context state -- the
953 * generated code could be cached globally in the screen.
956 make_variant_key(struct llvmpipe_context
*lp
,
957 struct lp_fragment_shader
*shader
,
958 struct lp_fragment_shader_variant_key
*key
)
962 memset(key
, 0, sizeof *key
);
964 if(lp
->framebuffer
.zsbuf
&&
965 lp
->depth_stencil
->depth
.enabled
) {
966 key
->zsbuf_format
= lp
->framebuffer
.zsbuf
->format
;
967 memcpy(&key
->depth
, &lp
->depth_stencil
->depth
, sizeof key
->depth
);
970 key
->alpha
.enabled
= lp
->depth_stencil
->alpha
.enabled
;
971 if(key
->alpha
.enabled
)
972 key
->alpha
.func
= lp
->depth_stencil
->alpha
.func
;
973 /* alpha.ref_value is passed in jit_context */
975 key
->flatshade
= lp
->rasterizer
->flatshade
;
977 if (lp
->framebuffer
.nr_cbufs
) {
978 memcpy(&key
->blend
, lp
->blend
, sizeof key
->blend
);
981 key
->nr_cbufs
= lp
->framebuffer
.nr_cbufs
;
982 for (i
= 0; i
< lp
->framebuffer
.nr_cbufs
; i
++) {
983 const struct util_format_description
*format_desc
;
986 format_desc
= util_format_description(lp
->framebuffer
.cbufs
[i
]->format
);
987 assert(format_desc
->layout
== UTIL_FORMAT_COLORSPACE_RGB
||
988 format_desc
->layout
== UTIL_FORMAT_COLORSPACE_SRGB
);
990 /* mask out color channels not present in the color buffer.
991 * Should be simple to incorporate per-cbuf writemasks:
993 for(chan
= 0; chan
< 4; ++chan
) {
994 enum util_format_swizzle swizzle
= format_desc
->swizzle
[chan
];
996 if(swizzle
<= UTIL_FORMAT_SWIZZLE_W
)
997 key
->cbuf_blend
[i
].colormask
|= (1 << chan
);
1001 for(i
= 0; i
< PIPE_MAX_SAMPLERS
; ++i
)
1002 if(shader
->info
.file_mask
[TGSI_FILE_SAMPLER
] & (1 << i
))
1003 lp_sampler_static_state(&key
->sampler
[i
], lp
->texture
[i
], lp
->sampler
[i
]);
1008 llvmpipe_update_fs(struct llvmpipe_context
*lp
)
1010 struct lp_fragment_shader
*shader
= lp
->fs
;
1011 struct lp_fragment_shader_variant_key key
;
1012 struct lp_fragment_shader_variant
*variant
;
1014 make_variant_key(lp
, shader
, &key
);
1016 variant
= shader
->variants
;
1018 if(memcmp(&variant
->key
, &key
, sizeof key
) == 0)
1021 variant
= variant
->next
;
1025 variant
= generate_variant(lp
, shader
, &key
);
1027 shader
->current
= variant
;
1029 lp_setup_set_fs_function(lp
->setup
,
1030 shader
->current
->jit_function
);