llvmpipe: fix stencil only formats.
[mesa.git] / src / gallium / drivers / llvmpipe / lp_state_fs.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007 VMware, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 /**
30 * @file
31 * Code generate the whole fragment pipeline.
32 *
33 * The fragment pipeline consists of the following stages:
34 * - early depth test
35 * - fragment shader
36 * - alpha test
37 * - depth/stencil test
38 * - blending
39 *
40 * This file has only the glue to assemble the fragment pipeline. The actual
41 * plumbing of converting Gallium state into LLVM IR is done elsewhere, in the
42 * lp_bld_*.[ch] files, and in a complete generic and reusable way. Here we
43 * muster the LLVM JIT execution engine to create a function that follows an
44 * established binary interface and that can be called from C directly.
45 *
46 * A big source of complexity here is that we often want to run different
47 * stages with different precisions and data types and precisions. For example,
48 * the fragment shader needs typically to be done in floats, but the
49 * depth/stencil test and blending is better done in the type that most closely
50 * matches the depth/stencil and color buffer respectively.
51 *
52 * Since the width of a SIMD vector register stays the same regardless of the
53 * element type, different types imply different number of elements, so we must
54 * code generate more instances of the stages with larger types to be able to
55 * feed/consume the stages with smaller types.
56 *
57 * @author Jose Fonseca <jfonseca@vmware.com>
58 */
59
60 #include <limits.h>
61 #include "pipe/p_defines.h"
62 #include "util/u_inlines.h"
63 #include "util/u_memory.h"
64 #include "util/u_pointer.h"
65 #include "util/format/u_format.h"
66 #include "util/u_dump.h"
67 #include "util/u_string.h"
68 #include "util/simple_list.h"
69 #include "util/u_dual_blend.h"
70 #include "util/os_time.h"
71 #include "pipe/p_shader_tokens.h"
72 #include "draw/draw_context.h"
73 #include "tgsi/tgsi_dump.h"
74 #include "tgsi/tgsi_scan.h"
75 #include "tgsi/tgsi_parse.h"
76 #include "gallivm/lp_bld_type.h"
77 #include "gallivm/lp_bld_const.h"
78 #include "gallivm/lp_bld_conv.h"
79 #include "gallivm/lp_bld_init.h"
80 #include "gallivm/lp_bld_intr.h"
81 #include "gallivm/lp_bld_logic.h"
82 #include "gallivm/lp_bld_tgsi.h"
83 #include "gallivm/lp_bld_nir.h"
84 #include "gallivm/lp_bld_swizzle.h"
85 #include "gallivm/lp_bld_flow.h"
86 #include "gallivm/lp_bld_debug.h"
87 #include "gallivm/lp_bld_arit.h"
88 #include "gallivm/lp_bld_bitarit.h"
89 #include "gallivm/lp_bld_pack.h"
90 #include "gallivm/lp_bld_format.h"
91 #include "gallivm/lp_bld_quad.h"
92
93 #include "lp_bld_alpha.h"
94 #include "lp_bld_blend.h"
95 #include "lp_bld_depth.h"
96 #include "lp_bld_interp.h"
97 #include "lp_context.h"
98 #include "lp_debug.h"
99 #include "lp_perf.h"
100 #include "lp_setup.h"
101 #include "lp_state.h"
102 #include "lp_tex_sample.h"
103 #include "lp_flush.h"
104 #include "lp_state_fs.h"
105 #include "lp_rast.h"
106 #include "nir/nir_to_tgsi_info.h"
107
108 #include "lp_screen.h"
109 #include "compiler/nir/nir_serialize.h"
110 #include "util/mesa-sha1.h"
111 /** Fragment shader number (for debugging) */
112 static unsigned fs_no = 0;
113
114
115 /**
116 * Expand the relevant bits of mask_input to a n*4-dword mask for the
117 * n*four pixels in n 2x2 quads. This will set the n*four elements of the
118 * quad mask vector to 0 or ~0.
119 * Grouping is 01, 23 for 2 quad mode hence only 0 and 2 are valid
120 * quad arguments with fs length 8.
121 *
122 * \param first_quad which quad(s) of the quad group to test, in [0,3]
123 * \param mask_input bitwise mask for the whole 4x4 stamp
124 */
125 static LLVMValueRef
126 generate_quad_mask(struct gallivm_state *gallivm,
127 struct lp_type fs_type,
128 unsigned first_quad,
129 unsigned sample,
130 LLVMValueRef mask_input) /* int64 */
131 {
132 LLVMBuilderRef builder = gallivm->builder;
133 struct lp_type mask_type;
134 LLVMTypeRef i32t = LLVMInt32TypeInContext(gallivm->context);
135 LLVMValueRef bits[16];
136 LLVMValueRef mask, bits_vec;
137 int shift, i;
138
139 /*
140 * XXX: We'll need a different path for 16 x u8
141 */
142 assert(fs_type.width == 32);
143 assert(fs_type.length <= ARRAY_SIZE(bits));
144 mask_type = lp_int_type(fs_type);
145
146 /*
147 * mask_input >>= (quad * 4)
148 */
149 switch (first_quad) {
150 case 0:
151 shift = 0;
152 break;
153 case 1:
154 assert(fs_type.length == 4);
155 shift = 2;
156 break;
157 case 2:
158 shift = 8;
159 break;
160 case 3:
161 assert(fs_type.length == 4);
162 shift = 10;
163 break;
164 default:
165 assert(0);
166 shift = 0;
167 }
168
169 mask_input = LLVMBuildLShr(builder, mask_input, lp_build_const_int64(gallivm, 16 * sample), "");
170 mask_input = LLVMBuildTrunc(builder, mask_input,
171 i32t, "");
172 mask_input = LLVMBuildAnd(builder, mask_input, lp_build_const_int32(gallivm, 0xffff), "");
173
174 mask_input = LLVMBuildLShr(builder,
175 mask_input,
176 LLVMConstInt(i32t, shift, 0),
177 "");
178
179 /*
180 * mask = { mask_input & (1 << i), for i in [0,3] }
181 */
182 mask = lp_build_broadcast(gallivm,
183 lp_build_vec_type(gallivm, mask_type),
184 mask_input);
185
186 for (i = 0; i < fs_type.length / 4; i++) {
187 unsigned j = 2 * (i % 2) + (i / 2) * 8;
188 bits[4*i + 0] = LLVMConstInt(i32t, 1ULL << (j + 0), 0);
189 bits[4*i + 1] = LLVMConstInt(i32t, 1ULL << (j + 1), 0);
190 bits[4*i + 2] = LLVMConstInt(i32t, 1ULL << (j + 4), 0);
191 bits[4*i + 3] = LLVMConstInt(i32t, 1ULL << (j + 5), 0);
192 }
193 bits_vec = LLVMConstVector(bits, fs_type.length);
194 mask = LLVMBuildAnd(builder, mask, bits_vec, "");
195
196 /*
197 * mask = mask == bits ? ~0 : 0
198 */
199 mask = lp_build_compare(gallivm,
200 mask_type, PIPE_FUNC_EQUAL,
201 mask, bits_vec);
202
203 return mask;
204 }
205
206
207 #define EARLY_DEPTH_TEST 0x1
208 #define LATE_DEPTH_TEST 0x2
209 #define EARLY_DEPTH_WRITE 0x4
210 #define LATE_DEPTH_WRITE 0x8
211
212 static int
213 find_output_by_semantic( const struct tgsi_shader_info *info,
214 unsigned semantic,
215 unsigned index )
216 {
217 int i;
218
219 for (i = 0; i < info->num_outputs; i++)
220 if (info->output_semantic_name[i] == semantic &&
221 info->output_semantic_index[i] == index)
222 return i;
223
224 return -1;
225 }
226
227
228 /**
229 * Fetch the specified lp_jit_viewport structure for a given viewport_index.
230 */
231 static LLVMValueRef
232 lp_llvm_viewport(LLVMValueRef context_ptr,
233 struct gallivm_state *gallivm,
234 LLVMValueRef viewport_index)
235 {
236 LLVMBuilderRef builder = gallivm->builder;
237 LLVMValueRef ptr;
238 LLVMValueRef res;
239 struct lp_type viewport_type =
240 lp_type_float_vec(32, 32 * LP_JIT_VIEWPORT_NUM_FIELDS);
241
242 ptr = lp_jit_context_viewports(gallivm, context_ptr);
243 ptr = LLVMBuildPointerCast(builder, ptr,
244 LLVMPointerType(lp_build_vec_type(gallivm, viewport_type), 0), "");
245
246 res = lp_build_pointer_get(builder, ptr, viewport_index);
247
248 return res;
249 }
250
251
252 static LLVMValueRef
253 lp_build_depth_clamp(struct gallivm_state *gallivm,
254 LLVMBuilderRef builder,
255 struct lp_type type,
256 LLVMValueRef context_ptr,
257 LLVMValueRef thread_data_ptr,
258 LLVMValueRef z)
259 {
260 LLVMValueRef viewport, min_depth, max_depth;
261 LLVMValueRef viewport_index;
262 struct lp_build_context f32_bld;
263
264 assert(type.floating);
265 lp_build_context_init(&f32_bld, gallivm, type);
266
267 /*
268 * Assumes clamping of the viewport index will occur in setup/gs. Value
269 * is passed through the rasterization stage via lp_rast_shader_inputs.
270 *
271 * See: draw_clamp_viewport_idx and lp_clamp_viewport_idx for clamping
272 * semantics.
273 */
274 viewport_index = lp_jit_thread_data_raster_state_viewport_index(gallivm,
275 thread_data_ptr);
276
277 /*
278 * Load the min and max depth from the lp_jit_context.viewports
279 * array of lp_jit_viewport structures.
280 */
281 viewport = lp_llvm_viewport(context_ptr, gallivm, viewport_index);
282
283 /* viewports[viewport_index].min_depth */
284 min_depth = LLVMBuildExtractElement(builder, viewport,
285 lp_build_const_int32(gallivm, LP_JIT_VIEWPORT_MIN_DEPTH), "");
286 min_depth = lp_build_broadcast_scalar(&f32_bld, min_depth);
287
288 /* viewports[viewport_index].max_depth */
289 max_depth = LLVMBuildExtractElement(builder, viewport,
290 lp_build_const_int32(gallivm, LP_JIT_VIEWPORT_MAX_DEPTH), "");
291 max_depth = lp_build_broadcast_scalar(&f32_bld, max_depth);
292
293 /*
294 * Clamp to the min and max depth values for the given viewport.
295 */
296 return lp_build_clamp(&f32_bld, z, min_depth, max_depth);
297 }
298
299 static void
300 lp_build_sample_alpha_to_coverage(struct gallivm_state *gallivm,
301 struct lp_type type,
302 unsigned coverage_samples,
303 LLVMValueRef num_loop,
304 LLVMValueRef loop_counter,
305 LLVMValueRef coverage_mask_store,
306 LLVMValueRef alpha)
307 {
308 struct lp_build_context bld;
309 LLVMBuilderRef builder = gallivm->builder;
310 float step = 1.0 / coverage_samples;
311
312 lp_build_context_init(&bld, gallivm, type);
313 for (unsigned s = 0; s < coverage_samples; s++) {
314 LLVMValueRef alpha_ref_value = lp_build_const_vec(gallivm, type, step * s);
315 LLVMValueRef test = lp_build_cmp(&bld, PIPE_FUNC_GREATER, alpha, alpha_ref_value);
316
317 LLVMValueRef s_mask_idx = LLVMBuildMul(builder, lp_build_const_int32(gallivm, s), num_loop, "");
318 s_mask_idx = LLVMBuildAdd(builder, s_mask_idx, loop_counter, "");
319 LLVMValueRef s_mask_ptr = LLVMBuildGEP(builder, coverage_mask_store, &s_mask_idx, 1, "");
320 LLVMValueRef s_mask = LLVMBuildLoad(builder, s_mask_ptr, "");
321 s_mask = LLVMBuildAnd(builder, s_mask, test, "");
322 LLVMBuildStore(builder, s_mask, s_mask_ptr);
323 }
324 };
325
326 struct lp_build_fs_llvm_iface {
327 struct lp_build_fs_iface base;
328 struct lp_build_interp_soa_context *interp;
329 struct lp_build_for_loop_state *loop_state;
330 LLVMValueRef mask_store;
331 };
332
333 static LLVMValueRef fs_interp(const struct lp_build_fs_iface *iface,
334 struct lp_build_context *bld,
335 unsigned attrib, unsigned chan,
336 bool centroid, bool sample,
337 LLVMValueRef attrib_indir,
338 LLVMValueRef offsets[2])
339 {
340 struct lp_build_fs_llvm_iface *fs_iface = (struct lp_build_fs_llvm_iface *)iface;
341 struct lp_build_interp_soa_context *interp = fs_iface->interp;
342 unsigned loc = TGSI_INTERPOLATE_LOC_CENTER;
343 if (centroid)
344 loc = TGSI_INTERPOLATE_LOC_CENTROID;
345 if (sample)
346 loc = TGSI_INTERPOLATE_LOC_SAMPLE;
347
348 return lp_build_interp_soa(interp, bld->gallivm, fs_iface->loop_state->counter,
349 fs_iface->mask_store,
350 attrib, chan, loc, attrib_indir, offsets);
351 }
352
353 /**
354 * Generate the fragment shader, depth/stencil test, and alpha tests.
355 */
356 static void
357 generate_fs_loop(struct gallivm_state *gallivm,
358 struct lp_fragment_shader *shader,
359 const struct lp_fragment_shader_variant_key *key,
360 LLVMBuilderRef builder,
361 struct lp_type type,
362 LLVMValueRef context_ptr,
363 LLVMValueRef sample_pos_array,
364 LLVMValueRef num_loop,
365 struct lp_build_interp_soa_context *interp,
366 const struct lp_build_sampler_soa *sampler,
367 const struct lp_build_image_soa *image,
368 LLVMValueRef mask_store,
369 LLVMValueRef (*out_color)[4],
370 LLVMValueRef depth_base_ptr,
371 LLVMValueRef depth_stride,
372 LLVMValueRef depth_sample_stride,
373 LLVMValueRef facing,
374 LLVMValueRef thread_data_ptr)
375 {
376 const struct util_format_description *zs_format_desc = NULL;
377 const struct tgsi_token *tokens = shader->base.tokens;
378 struct lp_type int_type = lp_int_type(type);
379 LLVMTypeRef vec_type, int_vec_type;
380 LLVMValueRef mask_ptr = NULL, mask_val = NULL;
381 LLVMValueRef consts_ptr, num_consts_ptr;
382 LLVMValueRef ssbo_ptr, num_ssbo_ptr;
383 LLVMValueRef z;
384 LLVMValueRef z_value, s_value;
385 LLVMValueRef z_fb, s_fb;
386 LLVMValueRef depth_ptr;
387 LLVMValueRef stencil_refs[2];
388 LLVMValueRef outputs[PIPE_MAX_SHADER_OUTPUTS][TGSI_NUM_CHANNELS];
389 LLVMValueRef zs_samples = lp_build_const_int32(gallivm, key->zsbuf_nr_samples);
390 struct lp_build_for_loop_state loop_state, sample_loop_state;
391 struct lp_build_mask_context mask;
392 /*
393 * TODO: figure out if simple_shader optimization is really worthwile to
394 * keep. Disabled because it may hide some real bugs in the (depth/stencil)
395 * code since tests tend to take another codepath than real shaders.
396 */
397 boolean simple_shader = (shader->info.base.file_count[TGSI_FILE_SAMPLER] == 0 &&
398 shader->info.base.num_inputs < 3 &&
399 shader->info.base.num_instructions < 8) && 0;
400 const boolean dual_source_blend = key->blend.rt[0].blend_enable &&
401 util_blend_state_is_dual(&key->blend, 0);
402 const bool post_depth_coverage = shader->info.base.properties[TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE];
403 unsigned attrib;
404 unsigned chan;
405 unsigned cbuf;
406 unsigned depth_mode;
407
408 struct lp_bld_tgsi_system_values system_values;
409
410 memset(&system_values, 0, sizeof(system_values));
411
412 /* truncate then sign extend. */
413 system_values.front_facing = LLVMBuildTrunc(gallivm->builder, facing, LLVMInt1TypeInContext(gallivm->context), "");
414 system_values.front_facing = LLVMBuildSExt(gallivm->builder, system_values.front_facing, LLVMInt32TypeInContext(gallivm->context), "");
415
416 if (key->depth.enabled ||
417 key->stencil[0].enabled) {
418
419 zs_format_desc = util_format_description(key->zsbuf_format);
420 assert(zs_format_desc);
421
422 if (shader->info.base.properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL])
423 depth_mode = EARLY_DEPTH_TEST | EARLY_DEPTH_WRITE;
424 else if (!shader->info.base.writes_z && !shader->info.base.writes_stencil) {
425 if (shader->info.base.writes_memory)
426 depth_mode = LATE_DEPTH_TEST | LATE_DEPTH_WRITE;
427 else if (key->alpha.enabled ||
428 key->blend.alpha_to_coverage ||
429 shader->info.base.uses_kill ||
430 shader->info.base.writes_samplemask) {
431 /* With alpha test and kill, can do the depth test early
432 * and hopefully eliminate some quads. But need to do a
433 * special deferred depth write once the final mask value
434 * is known. This only works though if there's either no
435 * stencil test or the stencil value isn't written.
436 */
437 if (key->stencil[0].enabled && (key->stencil[0].writemask ||
438 (key->stencil[1].enabled &&
439 key->stencil[1].writemask)))
440 depth_mode = LATE_DEPTH_TEST | LATE_DEPTH_WRITE;
441 else
442 depth_mode = EARLY_DEPTH_TEST | LATE_DEPTH_WRITE;
443 }
444 else
445 depth_mode = EARLY_DEPTH_TEST | EARLY_DEPTH_WRITE;
446 }
447 else {
448 depth_mode = LATE_DEPTH_TEST | LATE_DEPTH_WRITE;
449 }
450
451 if (!(key->depth.enabled && key->depth.writemask) &&
452 !(key->stencil[0].enabled && (key->stencil[0].writemask ||
453 (key->stencil[1].enabled &&
454 key->stencil[1].writemask))))
455 depth_mode &= ~(LATE_DEPTH_WRITE | EARLY_DEPTH_WRITE);
456 }
457 else {
458 depth_mode = 0;
459 }
460
461 vec_type = lp_build_vec_type(gallivm, type);
462 int_vec_type = lp_build_vec_type(gallivm, int_type);
463
464 stencil_refs[0] = lp_jit_context_stencil_ref_front_value(gallivm, context_ptr);
465 stencil_refs[1] = lp_jit_context_stencil_ref_back_value(gallivm, context_ptr);
466 /* convert scalar stencil refs into vectors */
467 stencil_refs[0] = lp_build_broadcast(gallivm, int_vec_type, stencil_refs[0]);
468 stencil_refs[1] = lp_build_broadcast(gallivm, int_vec_type, stencil_refs[1]);
469
470 consts_ptr = lp_jit_context_constants(gallivm, context_ptr);
471 num_consts_ptr = lp_jit_context_num_constants(gallivm, context_ptr);
472
473 ssbo_ptr = lp_jit_context_ssbos(gallivm, context_ptr);
474 num_ssbo_ptr = lp_jit_context_num_ssbos(gallivm, context_ptr);
475
476 memset(outputs, 0, sizeof outputs);
477
478 /* Allocate color storage for each fragment sample */
479 LLVMValueRef color_store_size = num_loop;
480 if (key->min_samples > 1)
481 color_store_size = LLVMBuildMul(builder, num_loop, lp_build_const_int32(gallivm, key->min_samples), "");
482
483 for(cbuf = 0; cbuf < key->nr_cbufs; cbuf++) {
484 for(chan = 0; chan < TGSI_NUM_CHANNELS; ++chan) {
485 out_color[cbuf][chan] = lp_build_array_alloca(gallivm,
486 lp_build_vec_type(gallivm,
487 type),
488 color_store_size, "color");
489 }
490 }
491 if (dual_source_blend) {
492 assert(key->nr_cbufs <= 1);
493 for(chan = 0; chan < TGSI_NUM_CHANNELS; ++chan) {
494 out_color[1][chan] = lp_build_array_alloca(gallivm,
495 lp_build_vec_type(gallivm,
496 type),
497 color_store_size, "color1");
498 }
499 }
500
501 lp_build_for_loop_begin(&loop_state, gallivm,
502 lp_build_const_int32(gallivm, 0),
503 LLVMIntULT,
504 num_loop,
505 lp_build_const_int32(gallivm, 1));
506
507 LLVMValueRef sample_mask_in;
508 if (key->multisample) {
509 sample_mask_in = lp_build_const_int_vec(gallivm, type, 0);
510 /* create shader execution mask by combining all sample masks. */
511 for (unsigned s = 0; s < key->coverage_samples; s++) {
512 LLVMValueRef s_mask_idx = LLVMBuildMul(builder, num_loop, lp_build_const_int32(gallivm, s), "");
513 s_mask_idx = LLVMBuildAdd(builder, s_mask_idx, loop_state.counter, "");
514 LLVMValueRef s_mask = lp_build_pointer_get(builder, mask_store, s_mask_idx);
515 if (s == 0)
516 mask_val = s_mask;
517 else
518 mask_val = LLVMBuildOr(builder, s_mask, mask_val, "");
519
520 LLVMValueRef mask_in = LLVMBuildAnd(builder, s_mask, lp_build_const_int_vec(gallivm, type, (1 << s)), "");
521 sample_mask_in = LLVMBuildOr(builder, sample_mask_in, mask_in, "");
522 }
523 } else {
524 sample_mask_in = lp_build_const_int_vec(gallivm, type, 1);
525 mask_ptr = LLVMBuildGEP(builder, mask_store,
526 &loop_state.counter, 1, "mask_ptr");
527 mask_val = LLVMBuildLoad(builder, mask_ptr, "");
528
529 LLVMValueRef mask_in = LLVMBuildAnd(builder, mask_val, lp_build_const_int_vec(gallivm, type, 1), "");
530 sample_mask_in = LLVMBuildOr(builder, sample_mask_in, mask_in, "");
531 }
532
533 /* 'mask' will control execution based on quad's pixel alive/killed state */
534 lp_build_mask_begin(&mask, gallivm, type, mask_val);
535
536 if (!(depth_mode & EARLY_DEPTH_TEST) && !simple_shader)
537 lp_build_mask_check(&mask);
538
539 /* Create storage for recombining sample masks after early Z pass. */
540 LLVMValueRef s_mask_or = lp_build_alloca(gallivm, lp_build_int_vec_type(gallivm, type), "cov_mask_early_depth");
541 LLVMBuildStore(builder, LLVMConstNull(lp_build_int_vec_type(gallivm, type)), s_mask_or);
542
543 /* Create storage for post depth sample mask */
544 LLVMValueRef post_depth_sample_mask_in = NULL;
545 if (post_depth_coverage)
546 post_depth_sample_mask_in = lp_build_alloca(gallivm, int_vec_type, "post_depth_sample_mask_in");
547
548 LLVMValueRef s_mask = NULL, s_mask_ptr = NULL;
549 LLVMValueRef z_sample_value_store = NULL, s_sample_value_store = NULL;
550 LLVMValueRef z_fb_store = NULL, s_fb_store = NULL;
551 LLVMTypeRef z_type = NULL, z_fb_type = NULL;
552
553 /* Run early depth once per sample */
554 if (key->multisample) {
555
556 if (zs_format_desc) {
557 struct lp_type zs_type = lp_depth_type(zs_format_desc, type.length);
558 struct lp_type z_type = zs_type;
559 struct lp_type s_type = zs_type;
560 if (zs_format_desc->block.bits < type.width)
561 z_type.width = type.width;
562 if (zs_format_desc->block.bits == 8)
563 s_type.width = type.width;
564
565 else if (zs_format_desc->block.bits > 32) {
566 z_type.width = z_type.width / 2;
567 s_type.width = s_type.width / 2;
568 s_type.floating = 0;
569 }
570 z_sample_value_store = lp_build_array_alloca(gallivm, lp_build_int_vec_type(gallivm, type),
571 zs_samples, "z_sample_store");
572 s_sample_value_store = lp_build_array_alloca(gallivm, lp_build_int_vec_type(gallivm, type),
573 zs_samples, "s_sample_store");
574 z_fb_store = lp_build_array_alloca(gallivm, lp_build_vec_type(gallivm, z_type),
575 zs_samples, "z_fb_store");
576 s_fb_store = lp_build_array_alloca(gallivm, lp_build_vec_type(gallivm, s_type),
577 zs_samples, "s_fb_store");
578 }
579 lp_build_for_loop_begin(&sample_loop_state, gallivm,
580 lp_build_const_int32(gallivm, 0),
581 LLVMIntULT, lp_build_const_int32(gallivm, key->coverage_samples),
582 lp_build_const_int32(gallivm, 1));
583
584 LLVMValueRef s_mask_idx = LLVMBuildMul(builder, sample_loop_state.counter, num_loop, "");
585 s_mask_idx = LLVMBuildAdd(builder, s_mask_idx, loop_state.counter, "");
586 s_mask_ptr = LLVMBuildGEP(builder, mask_store, &s_mask_idx, 1, "");
587
588 s_mask = LLVMBuildLoad(builder, s_mask_ptr, "");
589 s_mask = LLVMBuildAnd(builder, s_mask, mask_val, "");
590 }
591
592
593 /* for multisample Z needs to be interpolated at sample points for testing. */
594 lp_build_interp_soa_update_pos_dyn(interp, gallivm, loop_state.counter, key->multisample ? sample_loop_state.counter : NULL);
595 z = interp->pos[2];
596
597 depth_ptr = depth_base_ptr;
598 if (key->multisample) {
599 LLVMValueRef sample_offset = LLVMBuildMul(builder, sample_loop_state.counter, depth_sample_stride, "");
600 depth_ptr = LLVMBuildGEP(builder, depth_ptr, &sample_offset, 1, "");
601 }
602
603 if (depth_mode & EARLY_DEPTH_TEST) {
604 /*
605 * Clamp according to ARB_depth_clamp semantics.
606 */
607 if (key->depth_clamp) {
608 z = lp_build_depth_clamp(gallivm, builder, type, context_ptr,
609 thread_data_ptr, z);
610 }
611 lp_build_depth_stencil_load_swizzled(gallivm, type,
612 zs_format_desc, key->resource_1d,
613 depth_ptr, depth_stride,
614 &z_fb, &s_fb, loop_state.counter);
615 lp_build_depth_stencil_test(gallivm,
616 &key->depth,
617 key->stencil,
618 type,
619 zs_format_desc,
620 key->multisample ? NULL : &mask,
621 &s_mask,
622 stencil_refs,
623 z, z_fb, s_fb,
624 facing,
625 &z_value, &s_value,
626 !simple_shader && !key->multisample);
627
628 if (depth_mode & EARLY_DEPTH_WRITE) {
629 lp_build_depth_stencil_write_swizzled(gallivm, type,
630 zs_format_desc, key->resource_1d,
631 NULL, NULL, NULL, loop_state.counter,
632 depth_ptr, depth_stride,
633 z_value, s_value);
634 }
635 /*
636 * Note mask check if stencil is enabled must be after ds write not after
637 * stencil test otherwise new stencil values may not get written if all
638 * fragments got killed by depth/stencil test.
639 */
640 if (!simple_shader && key->stencil[0].enabled && !key->multisample)
641 lp_build_mask_check(&mask);
642
643 if (key->multisample) {
644 z_fb_type = LLVMTypeOf(z_fb);
645 z_type = LLVMTypeOf(z_value);
646 lp_build_pointer_set(builder, z_sample_value_store, sample_loop_state.counter, LLVMBuildBitCast(builder, z_value, lp_build_int_vec_type(gallivm, type), ""));
647 lp_build_pointer_set(builder, s_sample_value_store, sample_loop_state.counter, LLVMBuildBitCast(builder, s_value, lp_build_int_vec_type(gallivm, type), ""));
648 lp_build_pointer_set(builder, z_fb_store, sample_loop_state.counter, z_fb);
649 lp_build_pointer_set(builder, s_fb_store, sample_loop_state.counter, s_fb);
650 }
651 }
652
653 if (key->multisample) {
654 /*
655 * Store the post-early Z coverage mask.
656 * Recombine the resulting coverage masks post early Z into the fragment
657 * shader execution mask.
658 */
659 LLVMValueRef tmp_s_mask_or = LLVMBuildLoad(builder, s_mask_or, "");
660 tmp_s_mask_or = LLVMBuildOr(builder, tmp_s_mask_or, s_mask, "");
661 LLVMBuildStore(builder, tmp_s_mask_or, s_mask_or);
662
663 if (post_depth_coverage) {
664 LLVMValueRef mask_bit_idx = LLVMBuildShl(builder, lp_build_const_int32(gallivm, 1), sample_loop_state.counter, "");
665 LLVMValueRef post_depth_mask_in = LLVMBuildLoad(builder, post_depth_sample_mask_in, "");
666 mask_bit_idx = LLVMBuildAnd(builder, s_mask, lp_build_broadcast(gallivm, int_vec_type, mask_bit_idx), "");
667 post_depth_mask_in = LLVMBuildOr(builder, post_depth_mask_in, mask_bit_idx, "");
668 LLVMBuildStore(builder, post_depth_mask_in, post_depth_sample_mask_in);
669 }
670
671 LLVMBuildStore(builder, s_mask, s_mask_ptr);
672
673 lp_build_for_loop_end(&sample_loop_state);
674
675 /* recombined all the coverage masks in the shader exec mask. */
676 tmp_s_mask_or = LLVMBuildLoad(builder, s_mask_or, "");
677 lp_build_mask_update(&mask, tmp_s_mask_or);
678
679 if (key->min_samples == 1) {
680 /* for multisample Z needs to be re interpolated at pixel center */
681 lp_build_interp_soa_update_pos_dyn(interp, gallivm, loop_state.counter, NULL);
682 lp_build_mask_update(&mask, tmp_s_mask_or);
683 }
684 } else {
685 if (post_depth_coverage) {
686 LLVMValueRef post_depth_mask_in = LLVMBuildAnd(builder, lp_build_mask_value(&mask), lp_build_const_int_vec(gallivm, type, 1), "");
687 LLVMBuildStore(builder, post_depth_mask_in, post_depth_sample_mask_in);
688 }
689 }
690
691 LLVMValueRef out_sample_mask_storage = NULL;
692 if (shader->info.base.writes_samplemask) {
693 out_sample_mask_storage = lp_build_alloca(gallivm, int_vec_type, "write_mask");
694 if (key->min_samples > 1)
695 LLVMBuildStore(builder, LLVMConstNull(int_vec_type), out_sample_mask_storage);
696 }
697
698 if (post_depth_coverage) {
699 system_values.sample_mask_in = LLVMBuildLoad(builder, post_depth_sample_mask_in, "");
700 }
701 else
702 system_values.sample_mask_in = sample_mask_in;
703 if (key->multisample && key->min_samples > 1) {
704 lp_build_for_loop_begin(&sample_loop_state, gallivm,
705 lp_build_const_int32(gallivm, 0),
706 LLVMIntULT,
707 lp_build_const_int32(gallivm, key->min_samples),
708 lp_build_const_int32(gallivm, 1));
709
710 LLVMValueRef s_mask_idx = LLVMBuildMul(builder, sample_loop_state.counter, num_loop, "");
711 s_mask_idx = LLVMBuildAdd(builder, s_mask_idx, loop_state.counter, "");
712 s_mask_ptr = LLVMBuildGEP(builder, mask_store, &s_mask_idx, 1, "");
713 s_mask = LLVMBuildLoad(builder, s_mask_ptr, "");
714 lp_build_mask_force(&mask, s_mask);
715 lp_build_interp_soa_update_pos_dyn(interp, gallivm, loop_state.counter, sample_loop_state.counter);
716 system_values.sample_id = sample_loop_state.counter;
717 system_values.sample_mask_in = LLVMBuildAnd(builder, system_values.sample_mask_in,
718 lp_build_broadcast(gallivm, int_vec_type,
719 LLVMBuildShl(builder, lp_build_const_int32(gallivm, 1), sample_loop_state.counter, "")), "");
720 } else {
721 system_values.sample_id = lp_build_const_int32(gallivm, 0);
722
723 }
724 system_values.sample_pos = sample_pos_array;
725
726 lp_build_interp_soa_update_inputs_dyn(interp, gallivm, loop_state.counter, mask_store, sample_loop_state.counter);
727
728 struct lp_build_fs_llvm_iface fs_iface = {
729 .base.interp_fn = fs_interp,
730 .interp = interp,
731 .loop_state = &loop_state,
732 .mask_store = mask_store,
733 };
734
735 struct lp_build_tgsi_params params;
736 memset(&params, 0, sizeof(params));
737
738 params.type = type;
739 params.mask = &mask;
740 params.fs_iface = &fs_iface.base;
741 params.consts_ptr = consts_ptr;
742 params.const_sizes_ptr = num_consts_ptr;
743 params.system_values = &system_values;
744 params.inputs = interp->inputs;
745 params.context_ptr = context_ptr;
746 params.thread_data_ptr = thread_data_ptr;
747 params.sampler = sampler;
748 params.info = &shader->info.base;
749 params.ssbo_ptr = ssbo_ptr;
750 params.ssbo_sizes_ptr = num_ssbo_ptr;
751 params.image = image;
752
753 /* Build the actual shader */
754 if (shader->base.type == PIPE_SHADER_IR_TGSI)
755 lp_build_tgsi_soa(gallivm, tokens, &params,
756 outputs);
757 else
758 lp_build_nir_soa(gallivm, shader->base.ir.nir, &params,
759 outputs);
760
761 /* Alpha test */
762 if (key->alpha.enabled) {
763 int color0 = find_output_by_semantic(&shader->info.base,
764 TGSI_SEMANTIC_COLOR,
765 0);
766
767 if (color0 != -1 && outputs[color0][3]) {
768 const struct util_format_description *cbuf_format_desc;
769 LLVMValueRef alpha = LLVMBuildLoad(builder, outputs[color0][3], "alpha");
770 LLVMValueRef alpha_ref_value;
771
772 alpha_ref_value = lp_jit_context_alpha_ref_value(gallivm, context_ptr);
773 alpha_ref_value = lp_build_broadcast(gallivm, vec_type, alpha_ref_value);
774
775 cbuf_format_desc = util_format_description(key->cbuf_format[0]);
776
777 lp_build_alpha_test(gallivm, key->alpha.func, type, cbuf_format_desc,
778 &mask, alpha, alpha_ref_value,
779 (depth_mode & LATE_DEPTH_TEST) != 0);
780 }
781 }
782
783 /* Emulate Alpha to Coverage with Alpha test */
784 if (key->blend.alpha_to_coverage) {
785 int color0 = find_output_by_semantic(&shader->info.base,
786 TGSI_SEMANTIC_COLOR,
787 0);
788
789 if (color0 != -1 && outputs[color0][3]) {
790 LLVMValueRef alpha = LLVMBuildLoad(builder, outputs[color0][3], "alpha");
791
792 if (!key->multisample) {
793 lp_build_alpha_to_coverage(gallivm, type,
794 &mask, alpha,
795 (depth_mode & LATE_DEPTH_TEST) != 0);
796 } else {
797 lp_build_sample_alpha_to_coverage(gallivm, type, key->coverage_samples, num_loop,
798 loop_state.counter,
799 mask_store, alpha);
800 }
801 }
802 }
803 if (key->blend.alpha_to_one && key->multisample) {
804 for (attrib = 0; attrib < shader->info.base.num_outputs; ++attrib) {
805 unsigned cbuf = shader->info.base.output_semantic_index[attrib];
806 if ((shader->info.base.output_semantic_name[attrib] == TGSI_SEMANTIC_COLOR) &&
807 ((cbuf < key->nr_cbufs) || (cbuf == 1 && dual_source_blend)))
808 if (outputs[cbuf][3]) {
809 LLVMBuildStore(builder, lp_build_const_vec(gallivm, type, 1.0), outputs[cbuf][3]);
810 }
811 }
812 }
813 if (shader->info.base.writes_samplemask) {
814 LLVMValueRef output_smask = NULL;
815 int smaski = find_output_by_semantic(&shader->info.base,
816 TGSI_SEMANTIC_SAMPLEMASK,
817 0);
818 struct lp_build_context smask_bld;
819 lp_build_context_init(&smask_bld, gallivm, int_type);
820
821 assert(smaski >= 0);
822 output_smask = LLVMBuildLoad(builder, outputs[smaski][0], "smask");
823 output_smask = LLVMBuildBitCast(builder, output_smask, smask_bld.vec_type, "");
824
825 if (key->min_samples > 1) {
826 /* only the bit corresponding to this sample is to be used. */
827 LLVMValueRef tmp_mask = LLVMBuildLoad(builder, out_sample_mask_storage, "tmp_mask");
828 LLVMValueRef out_smask_idx = LLVMBuildShl(builder, lp_build_const_int32(gallivm, 1), sample_loop_state.counter, "");
829 LLVMValueRef smask_bit = LLVMBuildAnd(builder, output_smask, lp_build_broadcast(gallivm, int_vec_type, out_smask_idx), "");
830 output_smask = LLVMBuildOr(builder, tmp_mask, smask_bit, "");
831 }
832
833 LLVMBuildStore(builder, output_smask, out_sample_mask_storage);
834 }
835
836 /* Color write - per fragment sample */
837 for (attrib = 0; attrib < shader->info.base.num_outputs; ++attrib)
838 {
839 unsigned cbuf = shader->info.base.output_semantic_index[attrib];
840 if ((shader->info.base.output_semantic_name[attrib] == TGSI_SEMANTIC_COLOR) &&
841 ((cbuf < key->nr_cbufs) || (cbuf == 1 && dual_source_blend)))
842 {
843 for(chan = 0; chan < TGSI_NUM_CHANNELS; ++chan) {
844 if(outputs[attrib][chan]) {
845 /* XXX: just initialize outputs to point at colors[] and
846 * skip this.
847 */
848 LLVMValueRef out = LLVMBuildLoad(builder, outputs[attrib][chan], "");
849 LLVMValueRef color_ptr;
850 LLVMValueRef color_idx = loop_state.counter;
851 if (key->min_samples > 1)
852 color_idx = LLVMBuildAdd(builder, color_idx,
853 LLVMBuildMul(builder, sample_loop_state.counter, num_loop, ""), "");
854 color_ptr = LLVMBuildGEP(builder, out_color[cbuf][chan],
855 &color_idx, 1, "");
856 lp_build_name(out, "color%u.%c", attrib, "rgba"[chan]);
857 LLVMBuildStore(builder, out, color_ptr);
858 }
859 }
860 }
861 }
862
863 if (key->multisample && key->min_samples > 1) {
864 LLVMBuildStore(builder, lp_build_mask_value(&mask), s_mask_ptr);
865 lp_build_for_loop_end(&sample_loop_state);
866 }
867
868 if (key->multisample) {
869 /* execute depth test for each sample */
870 lp_build_for_loop_begin(&sample_loop_state, gallivm,
871 lp_build_const_int32(gallivm, 0),
872 LLVMIntULT, lp_build_const_int32(gallivm, key->coverage_samples),
873 lp_build_const_int32(gallivm, 1));
874
875 /* load the per-sample coverage mask */
876 LLVMValueRef s_mask_idx = LLVMBuildMul(builder, sample_loop_state.counter, num_loop, "");
877 s_mask_idx = LLVMBuildAdd(builder, s_mask_idx, loop_state.counter, "");
878 s_mask_ptr = LLVMBuildGEP(builder, mask_store, &s_mask_idx, 1, "");
879
880 /* combine the execution mask post fragment shader with the coverage mask. */
881 s_mask = LLVMBuildLoad(builder, s_mask_ptr, "");
882 if (key->min_samples == 1)
883 s_mask = LLVMBuildAnd(builder, s_mask, lp_build_mask_value(&mask), "");
884
885 /* if the shader writes sample mask use that */
886 if (shader->info.base.writes_samplemask) {
887 LLVMValueRef out_smask_idx = LLVMBuildShl(builder, lp_build_const_int32(gallivm, 1), sample_loop_state.counter, "");
888 out_smask_idx = lp_build_broadcast(gallivm, int_vec_type, out_smask_idx);
889 LLVMValueRef output_smask = LLVMBuildLoad(builder, out_sample_mask_storage, "");
890 LLVMValueRef smask_bit = LLVMBuildAnd(builder, output_smask, out_smask_idx, "");
891 LLVMValueRef cmp = LLVMBuildICmp(builder, LLVMIntNE, smask_bit, lp_build_const_int_vec(gallivm, int_type, 0), "");
892 smask_bit = LLVMBuildSExt(builder, cmp, int_vec_type, "");
893
894 s_mask = LLVMBuildAnd(builder, s_mask, smask_bit, "");
895 }
896 }
897
898 depth_ptr = depth_base_ptr;
899 if (key->multisample) {
900 LLVMValueRef sample_offset = LLVMBuildMul(builder, sample_loop_state.counter, depth_sample_stride, "");
901 depth_ptr = LLVMBuildGEP(builder, depth_ptr, &sample_offset, 1, "");
902 }
903
904 /* Late Z test */
905 if (depth_mode & LATE_DEPTH_TEST) {
906 int pos0 = find_output_by_semantic(&shader->info.base,
907 TGSI_SEMANTIC_POSITION,
908 0);
909 int s_out = find_output_by_semantic(&shader->info.base,
910 TGSI_SEMANTIC_STENCIL,
911 0);
912 if (pos0 != -1 && outputs[pos0][2]) {
913 z = LLVMBuildLoad(builder, outputs[pos0][2], "output.z");
914 }
915 /*
916 * Clamp according to ARB_depth_clamp semantics.
917 */
918 if (key->depth_clamp) {
919 z = lp_build_depth_clamp(gallivm, builder, type, context_ptr,
920 thread_data_ptr, z);
921 }
922
923 if (s_out != -1 && outputs[s_out][1]) {
924 /* there's only one value, and spec says to discard additional bits */
925 LLVMValueRef s_max_mask = lp_build_const_int_vec(gallivm, int_type, 255);
926 stencil_refs[0] = LLVMBuildLoad(builder, outputs[s_out][1], "output.s");
927 stencil_refs[0] = LLVMBuildBitCast(builder, stencil_refs[0], int_vec_type, "");
928 stencil_refs[0] = LLVMBuildAnd(builder, stencil_refs[0], s_max_mask, "");
929 stencil_refs[1] = stencil_refs[0];
930 }
931
932 lp_build_depth_stencil_load_swizzled(gallivm, type,
933 zs_format_desc, key->resource_1d,
934 depth_ptr, depth_stride,
935 &z_fb, &s_fb, loop_state.counter);
936
937 lp_build_depth_stencil_test(gallivm,
938 &key->depth,
939 key->stencil,
940 type,
941 zs_format_desc,
942 key->multisample ? NULL : &mask,
943 &s_mask,
944 stencil_refs,
945 z, z_fb, s_fb,
946 facing,
947 &z_value, &s_value,
948 !simple_shader);
949 /* Late Z write */
950 if (depth_mode & LATE_DEPTH_WRITE) {
951 lp_build_depth_stencil_write_swizzled(gallivm, type,
952 zs_format_desc, key->resource_1d,
953 NULL, NULL, NULL, loop_state.counter,
954 depth_ptr, depth_stride,
955 z_value, s_value);
956 }
957 }
958 else if ((depth_mode & EARLY_DEPTH_TEST) &&
959 (depth_mode & LATE_DEPTH_WRITE))
960 {
961 /* Need to apply a reduced mask to the depth write. Reload the
962 * depth value, update from zs_value with the new mask value and
963 * write that out.
964 */
965 if (key->multisample) {
966 z_value = LLVMBuildBitCast(builder, lp_build_pointer_get(builder, z_sample_value_store, sample_loop_state.counter), z_type, "");;
967 s_value = lp_build_pointer_get(builder, s_sample_value_store, sample_loop_state.counter);
968 z_fb = LLVMBuildBitCast(builder, lp_build_pointer_get(builder, z_fb_store, sample_loop_state.counter), z_fb_type, "");
969 s_fb = lp_build_pointer_get(builder, s_fb_store, sample_loop_state.counter);
970 }
971 lp_build_depth_stencil_write_swizzled(gallivm, type,
972 zs_format_desc, key->resource_1d,
973 key->multisample ? s_mask : lp_build_mask_value(&mask), z_fb, s_fb, loop_state.counter,
974 depth_ptr, depth_stride,
975 z_value, s_value);
976 }
977
978 if (key->occlusion_count) {
979 LLVMValueRef counter = lp_jit_thread_data_counter(gallivm, thread_data_ptr);
980 lp_build_name(counter, "counter");
981
982 lp_build_occlusion_count(gallivm, type,
983 key->multisample ? s_mask : lp_build_mask_value(&mask), counter);
984 }
985
986 if (key->multisample) {
987 /* store the sample mask for this loop */
988 LLVMBuildStore(builder, s_mask, s_mask_ptr);
989 lp_build_for_loop_end(&sample_loop_state);
990 }
991
992 mask_val = lp_build_mask_end(&mask);
993 if (!key->multisample)
994 LLVMBuildStore(builder, mask_val, mask_ptr);
995 lp_build_for_loop_end(&loop_state);
996 }
997
998
999 /**
1000 * This function will reorder pixels from the fragment shader SoA to memory layout AoS
1001 *
1002 * Fragment Shader outputs pixels in small 2x2 blocks
1003 * e.g. (0, 0), (1, 0), (0, 1), (1, 1) ; (2, 0) ...
1004 *
1005 * However in memory pixels are stored in rows
1006 * e.g. (0, 0), (1, 0), (2, 0), (3, 0) ; (0, 1) ...
1007 *
1008 * @param type fragment shader type (4x or 8x float)
1009 * @param num_fs number of fs_src
1010 * @param is_1d whether we're outputting to a 1d resource
1011 * @param dst_channels number of output channels
1012 * @param fs_src output from fragment shader
1013 * @param dst pointer to store result
1014 * @param pad_inline is channel padding inline or at end of row
1015 * @return the number of dsts
1016 */
1017 static int
1018 generate_fs_twiddle(struct gallivm_state *gallivm,
1019 struct lp_type type,
1020 unsigned num_fs,
1021 unsigned dst_channels,
1022 LLVMValueRef fs_src[][4],
1023 LLVMValueRef* dst,
1024 bool pad_inline)
1025 {
1026 LLVMValueRef src[16];
1027
1028 bool swizzle_pad;
1029 bool twiddle;
1030 bool split;
1031
1032 unsigned pixels = type.length / 4;
1033 unsigned reorder_group;
1034 unsigned src_channels;
1035 unsigned src_count;
1036 unsigned i;
1037
1038 src_channels = dst_channels < 3 ? dst_channels : 4;
1039 src_count = num_fs * src_channels;
1040
1041 assert(pixels == 2 || pixels == 1);
1042 assert(num_fs * src_channels <= ARRAY_SIZE(src));
1043
1044 /*
1045 * Transpose from SoA -> AoS
1046 */
1047 for (i = 0; i < num_fs; ++i) {
1048 lp_build_transpose_aos_n(gallivm, type, &fs_src[i][0], src_channels, &src[i * src_channels]);
1049 }
1050
1051 /*
1052 * Pick transformation options
1053 */
1054 swizzle_pad = false;
1055 twiddle = false;
1056 split = false;
1057 reorder_group = 0;
1058
1059 if (dst_channels == 1) {
1060 twiddle = true;
1061
1062 if (pixels == 2) {
1063 split = true;
1064 }
1065 } else if (dst_channels == 2) {
1066 if (pixels == 1) {
1067 reorder_group = 1;
1068 }
1069 } else if (dst_channels > 2) {
1070 if (pixels == 1) {
1071 reorder_group = 2;
1072 } else {
1073 twiddle = true;
1074 }
1075
1076 if (!pad_inline && dst_channels == 3 && pixels > 1) {
1077 swizzle_pad = true;
1078 }
1079 }
1080
1081 /*
1082 * Split the src in half
1083 */
1084 if (split) {
1085 for (i = num_fs; i > 0; --i) {
1086 src[(i - 1)*2 + 1] = lp_build_extract_range(gallivm, src[i - 1], 4, 4);
1087 src[(i - 1)*2 + 0] = lp_build_extract_range(gallivm, src[i - 1], 0, 4);
1088 }
1089
1090 src_count *= 2;
1091 type.length = 4;
1092 }
1093
1094 /*
1095 * Ensure pixels are in memory order
1096 */
1097 if (reorder_group) {
1098 /* Twiddle pixels by reordering the array, e.g.:
1099 *
1100 * src_count = 8 -> 0 2 1 3 4 6 5 7
1101 * src_count = 16 -> 0 1 4 5 2 3 6 7 8 9 12 13 10 11 14 15
1102 */
1103 const unsigned reorder_sw[] = { 0, 2, 1, 3 };
1104
1105 for (i = 0; i < src_count; ++i) {
1106 unsigned group = i / reorder_group;
1107 unsigned block = (group / 4) * 4 * reorder_group;
1108 unsigned j = block + (reorder_sw[group % 4] * reorder_group) + (i % reorder_group);
1109 dst[i] = src[j];
1110 }
1111 } else if (twiddle) {
1112 /* Twiddle pixels across elements of array */
1113 /*
1114 * XXX: we should avoid this in some cases, but would need to tell
1115 * lp_build_conv to reorder (or deal with it ourselves).
1116 */
1117 lp_bld_quad_twiddle(gallivm, type, src, src_count, dst);
1118 } else {
1119 /* Do nothing */
1120 memcpy(dst, src, sizeof(LLVMValueRef) * src_count);
1121 }
1122
1123 /*
1124 * Moves any padding between pixels to the end
1125 * e.g. RGBXRGBX -> RGBRGBXX
1126 */
1127 if (swizzle_pad) {
1128 unsigned char swizzles[16];
1129 unsigned elems = pixels * dst_channels;
1130
1131 for (i = 0; i < type.length; ++i) {
1132 if (i < elems)
1133 swizzles[i] = i % dst_channels + (i / dst_channels) * 4;
1134 else
1135 swizzles[i] = LP_BLD_SWIZZLE_DONTCARE;
1136 }
1137
1138 for (i = 0; i < src_count; ++i) {
1139 dst[i] = lp_build_swizzle_aos_n(gallivm, dst[i], swizzles, type.length, type.length);
1140 }
1141 }
1142
1143 return src_count;
1144 }
1145
1146
1147 /*
1148 * Untwiddle and transpose, much like the above.
1149 * However, this is after conversion, so we get packed vectors.
1150 * At this time only handle 4x16i8 rgba / 2x16i8 rg / 1x16i8 r data,
1151 * the vectors will look like:
1152 * r0r1r4r5r2r3r6r7r8r9r12... (albeit color channels may
1153 * be swizzled here). Extending to 16bit should be trivial.
1154 * Should also be extended to handle twice wide vectors with AVX2...
1155 */
1156 static void
1157 fs_twiddle_transpose(struct gallivm_state *gallivm,
1158 struct lp_type type,
1159 LLVMValueRef *src,
1160 unsigned src_count,
1161 LLVMValueRef *dst)
1162 {
1163 unsigned i, j;
1164 struct lp_type type64, type16, type32;
1165 LLVMTypeRef type64_t, type8_t, type16_t, type32_t;
1166 LLVMBuilderRef builder = gallivm->builder;
1167 LLVMValueRef tmp[4], shuf[8];
1168 for (j = 0; j < 2; j++) {
1169 shuf[j*4 + 0] = lp_build_const_int32(gallivm, j*4 + 0);
1170 shuf[j*4 + 1] = lp_build_const_int32(gallivm, j*4 + 2);
1171 shuf[j*4 + 2] = lp_build_const_int32(gallivm, j*4 + 1);
1172 shuf[j*4 + 3] = lp_build_const_int32(gallivm, j*4 + 3);
1173 }
1174
1175 assert(src_count == 4 || src_count == 2 || src_count == 1);
1176 assert(type.width == 8);
1177 assert(type.length == 16);
1178
1179 type8_t = lp_build_vec_type(gallivm, type);
1180
1181 type64 = type;
1182 type64.length /= 8;
1183 type64.width *= 8;
1184 type64_t = lp_build_vec_type(gallivm, type64);
1185
1186 type16 = type;
1187 type16.length /= 2;
1188 type16.width *= 2;
1189 type16_t = lp_build_vec_type(gallivm, type16);
1190
1191 type32 = type;
1192 type32.length /= 4;
1193 type32.width *= 4;
1194 type32_t = lp_build_vec_type(gallivm, type32);
1195
1196 lp_build_transpose_aos_n(gallivm, type, src, src_count, tmp);
1197
1198 if (src_count == 1) {
1199 /* transpose was no-op, just untwiddle */
1200 LLVMValueRef shuf_vec;
1201 shuf_vec = LLVMConstVector(shuf, 8);
1202 tmp[0] = LLVMBuildBitCast(builder, src[0], type16_t, "");
1203 tmp[0] = LLVMBuildShuffleVector(builder, tmp[0], tmp[0], shuf_vec, "");
1204 dst[0] = LLVMBuildBitCast(builder, tmp[0], type8_t, "");
1205 } else if (src_count == 2) {
1206 LLVMValueRef shuf_vec;
1207 shuf_vec = LLVMConstVector(shuf, 4);
1208
1209 for (i = 0; i < 2; i++) {
1210 tmp[i] = LLVMBuildBitCast(builder, tmp[i], type32_t, "");
1211 tmp[i] = LLVMBuildShuffleVector(builder, tmp[i], tmp[i], shuf_vec, "");
1212 dst[i] = LLVMBuildBitCast(builder, tmp[i], type8_t, "");
1213 }
1214 } else {
1215 for (j = 0; j < 2; j++) {
1216 LLVMValueRef lo, hi, lo2, hi2;
1217 /*
1218 * Note that if we only really have 3 valid channels (rgb)
1219 * and we don't need alpha we could substitute a undef here
1220 * for the respective channel (causing llvm to drop conversion
1221 * for alpha).
1222 */
1223 /* we now have rgba0rgba1rgba4rgba5 etc, untwiddle */
1224 lo2 = LLVMBuildBitCast(builder, tmp[j*2], type64_t, "");
1225 hi2 = LLVMBuildBitCast(builder, tmp[j*2 + 1], type64_t, "");
1226 lo = lp_build_interleave2(gallivm, type64, lo2, hi2, 0);
1227 hi = lp_build_interleave2(gallivm, type64, lo2, hi2, 1);
1228 dst[j*2] = LLVMBuildBitCast(builder, lo, type8_t, "");
1229 dst[j*2 + 1] = LLVMBuildBitCast(builder, hi, type8_t, "");
1230 }
1231 }
1232 }
1233
1234
1235 /**
1236 * Load an unswizzled block of pixels from memory
1237 */
1238 static void
1239 load_unswizzled_block(struct gallivm_state *gallivm,
1240 LLVMValueRef base_ptr,
1241 LLVMValueRef stride,
1242 unsigned block_width,
1243 unsigned block_height,
1244 LLVMValueRef* dst,
1245 struct lp_type dst_type,
1246 unsigned dst_count,
1247 unsigned dst_alignment)
1248 {
1249 LLVMBuilderRef builder = gallivm->builder;
1250 unsigned row_size = dst_count / block_height;
1251 unsigned i;
1252
1253 /* Ensure block exactly fits into dst */
1254 assert((block_width * block_height) % dst_count == 0);
1255
1256 for (i = 0; i < dst_count; ++i) {
1257 unsigned x = i % row_size;
1258 unsigned y = i / row_size;
1259
1260 LLVMValueRef bx = lp_build_const_int32(gallivm, x * (dst_type.width / 8) * dst_type.length);
1261 LLVMValueRef by = LLVMBuildMul(builder, lp_build_const_int32(gallivm, y), stride, "");
1262
1263 LLVMValueRef gep[2];
1264 LLVMValueRef dst_ptr;
1265
1266 gep[0] = lp_build_const_int32(gallivm, 0);
1267 gep[1] = LLVMBuildAdd(builder, bx, by, "");
1268
1269 dst_ptr = LLVMBuildGEP(builder, base_ptr, gep, 2, "");
1270 dst_ptr = LLVMBuildBitCast(builder, dst_ptr,
1271 LLVMPointerType(lp_build_vec_type(gallivm, dst_type), 0), "");
1272
1273 dst[i] = LLVMBuildLoad(builder, dst_ptr, "");
1274
1275 LLVMSetAlignment(dst[i], dst_alignment);
1276 }
1277 }
1278
1279
1280 /**
1281 * Store an unswizzled block of pixels to memory
1282 */
1283 static void
1284 store_unswizzled_block(struct gallivm_state *gallivm,
1285 LLVMValueRef base_ptr,
1286 LLVMValueRef stride,
1287 unsigned block_width,
1288 unsigned block_height,
1289 LLVMValueRef* src,
1290 struct lp_type src_type,
1291 unsigned src_count,
1292 unsigned src_alignment)
1293 {
1294 LLVMBuilderRef builder = gallivm->builder;
1295 unsigned row_size = src_count / block_height;
1296 unsigned i;
1297
1298 /* Ensure src exactly fits into block */
1299 assert((block_width * block_height) % src_count == 0);
1300
1301 for (i = 0; i < src_count; ++i) {
1302 unsigned x = i % row_size;
1303 unsigned y = i / row_size;
1304
1305 LLVMValueRef bx = lp_build_const_int32(gallivm, x * (src_type.width / 8) * src_type.length);
1306 LLVMValueRef by = LLVMBuildMul(builder, lp_build_const_int32(gallivm, y), stride, "");
1307
1308 LLVMValueRef gep[2];
1309 LLVMValueRef src_ptr;
1310
1311 gep[0] = lp_build_const_int32(gallivm, 0);
1312 gep[1] = LLVMBuildAdd(builder, bx, by, "");
1313
1314 src_ptr = LLVMBuildGEP(builder, base_ptr, gep, 2, "");
1315 src_ptr = LLVMBuildBitCast(builder, src_ptr,
1316 LLVMPointerType(lp_build_vec_type(gallivm, src_type), 0), "");
1317
1318 src_ptr = LLVMBuildStore(builder, src[i], src_ptr);
1319
1320 LLVMSetAlignment(src_ptr, src_alignment);
1321 }
1322 }
1323
1324
1325 /**
1326 * Checks if a format description is an arithmetic format
1327 *
1328 * A format which has irregular channel sizes such as R3_G3_B2 or R5_G6_B5.
1329 */
1330 static inline boolean
1331 is_arithmetic_format(const struct util_format_description *format_desc)
1332 {
1333 boolean arith = false;
1334 unsigned i;
1335
1336 for (i = 0; i < format_desc->nr_channels; ++i) {
1337 arith |= format_desc->channel[i].size != format_desc->channel[0].size;
1338 arith |= (format_desc->channel[i].size % 8) != 0;
1339 }
1340
1341 return arith;
1342 }
1343
1344
1345 /**
1346 * Checks if this format requires special handling due to required expansion
1347 * to floats for blending, and furthermore has "natural" packed AoS -> unpacked
1348 * SoA conversion.
1349 */
1350 static inline boolean
1351 format_expands_to_float_soa(const struct util_format_description *format_desc)
1352 {
1353 if (format_desc->format == PIPE_FORMAT_R11G11B10_FLOAT ||
1354 format_desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB) {
1355 return true;
1356 }
1357 return false;
1358 }
1359
1360
1361 /**
1362 * Retrieves the type representing the memory layout for a format
1363 *
1364 * e.g. RGBA16F = 4x half-float and R3G3B2 = 1x byte
1365 */
1366 static inline void
1367 lp_mem_type_from_format_desc(const struct util_format_description *format_desc,
1368 struct lp_type* type)
1369 {
1370 unsigned i;
1371 unsigned chan;
1372
1373 if (format_expands_to_float_soa(format_desc)) {
1374 /* just make this a uint with width of block */
1375 type->floating = false;
1376 type->fixed = false;
1377 type->sign = false;
1378 type->norm = false;
1379 type->width = format_desc->block.bits;
1380 type->length = 1;
1381 return;
1382 }
1383
1384 for (i = 0; i < 4; i++)
1385 if (format_desc->channel[i].type != UTIL_FORMAT_TYPE_VOID)
1386 break;
1387 chan = i;
1388
1389 memset(type, 0, sizeof(struct lp_type));
1390 type->floating = format_desc->channel[chan].type == UTIL_FORMAT_TYPE_FLOAT;
1391 type->fixed = format_desc->channel[chan].type == UTIL_FORMAT_TYPE_FIXED;
1392 type->sign = format_desc->channel[chan].type != UTIL_FORMAT_TYPE_UNSIGNED;
1393 type->norm = format_desc->channel[chan].normalized;
1394
1395 if (is_arithmetic_format(format_desc)) {
1396 type->width = 0;
1397 type->length = 1;
1398
1399 for (i = 0; i < format_desc->nr_channels; ++i) {
1400 type->width += format_desc->channel[i].size;
1401 }
1402 } else {
1403 type->width = format_desc->channel[chan].size;
1404 type->length = format_desc->nr_channels;
1405 }
1406 }
1407
1408
1409 /**
1410 * Retrieves the type for a format which is usable in the blending code.
1411 *
1412 * e.g. RGBA16F = 4x float, R3G3B2 = 3x byte
1413 */
1414 static inline void
1415 lp_blend_type_from_format_desc(const struct util_format_description *format_desc,
1416 struct lp_type* type)
1417 {
1418 unsigned i;
1419 unsigned chan;
1420
1421 if (format_expands_to_float_soa(format_desc)) {
1422 /* always use ordinary floats for blending */
1423 type->floating = true;
1424 type->fixed = false;
1425 type->sign = true;
1426 type->norm = false;
1427 type->width = 32;
1428 type->length = 4;
1429 return;
1430 }
1431
1432 for (i = 0; i < 4; i++)
1433 if (format_desc->channel[i].type != UTIL_FORMAT_TYPE_VOID)
1434 break;
1435 chan = i;
1436
1437 memset(type, 0, sizeof(struct lp_type));
1438 type->floating = format_desc->channel[chan].type == UTIL_FORMAT_TYPE_FLOAT;
1439 type->fixed = format_desc->channel[chan].type == UTIL_FORMAT_TYPE_FIXED;
1440 type->sign = format_desc->channel[chan].type != UTIL_FORMAT_TYPE_UNSIGNED;
1441 type->norm = format_desc->channel[chan].normalized;
1442 type->width = format_desc->channel[chan].size;
1443 type->length = format_desc->nr_channels;
1444
1445 for (i = 1; i < format_desc->nr_channels; ++i) {
1446 if (format_desc->channel[i].size > type->width)
1447 type->width = format_desc->channel[i].size;
1448 }
1449
1450 if (type->floating) {
1451 type->width = 32;
1452 } else {
1453 if (type->width <= 8) {
1454 type->width = 8;
1455 } else if (type->width <= 16) {
1456 type->width = 16;
1457 } else {
1458 type->width = 32;
1459 }
1460 }
1461
1462 if (is_arithmetic_format(format_desc) && type->length == 3) {
1463 type->length = 4;
1464 }
1465 }
1466
1467
1468 /**
1469 * Scale a normalized value from src_bits to dst_bits.
1470 *
1471 * The exact calculation is
1472 *
1473 * dst = iround(src * dst_mask / src_mask)
1474 *
1475 * or with integer rounding
1476 *
1477 * dst = src * (2*dst_mask + sign(src)*src_mask) / (2*src_mask)
1478 *
1479 * where
1480 *
1481 * src_mask = (1 << src_bits) - 1
1482 * dst_mask = (1 << dst_bits) - 1
1483 *
1484 * but we try to avoid division and multiplication through shifts.
1485 */
1486 static inline LLVMValueRef
1487 scale_bits(struct gallivm_state *gallivm,
1488 int src_bits,
1489 int dst_bits,
1490 LLVMValueRef src,
1491 struct lp_type src_type)
1492 {
1493 LLVMBuilderRef builder = gallivm->builder;
1494 LLVMValueRef result = src;
1495
1496 if (dst_bits < src_bits) {
1497 int delta_bits = src_bits - dst_bits;
1498
1499 if (delta_bits <= dst_bits) {
1500 /*
1501 * Approximate the rescaling with a single shift.
1502 *
1503 * This gives the wrong rounding.
1504 */
1505
1506 result = LLVMBuildLShr(builder,
1507 src,
1508 lp_build_const_int_vec(gallivm, src_type, delta_bits),
1509 "");
1510
1511 } else {
1512 /*
1513 * Try more accurate rescaling.
1514 */
1515
1516 /*
1517 * Drop the least significant bits to make space for the multiplication.
1518 *
1519 * XXX: A better approach would be to use a wider integer type as intermediate. But
1520 * this is enough to convert alpha from 16bits -> 2 when rendering to
1521 * PIPE_FORMAT_R10G10B10A2_UNORM.
1522 */
1523 result = LLVMBuildLShr(builder,
1524 src,
1525 lp_build_const_int_vec(gallivm, src_type, dst_bits),
1526 "");
1527
1528
1529 result = LLVMBuildMul(builder,
1530 result,
1531 lp_build_const_int_vec(gallivm, src_type, (1LL << dst_bits) - 1),
1532 "");
1533
1534 /*
1535 * Add a rounding term before the division.
1536 *
1537 * TODO: Handle signed integers too.
1538 */
1539 if (!src_type.sign) {
1540 result = LLVMBuildAdd(builder,
1541 result,
1542 lp_build_const_int_vec(gallivm, src_type, (1LL << (delta_bits - 1))),
1543 "");
1544 }
1545
1546 /*
1547 * Approximate the division by src_mask with a src_bits shift.
1548 *
1549 * Given the src has already been shifted by dst_bits, all we need
1550 * to do is to shift by the difference.
1551 */
1552
1553 result = LLVMBuildLShr(builder,
1554 result,
1555 lp_build_const_int_vec(gallivm, src_type, delta_bits),
1556 "");
1557 }
1558
1559 } else if (dst_bits > src_bits) {
1560 /* Scale up bits */
1561 int db = dst_bits - src_bits;
1562
1563 /* Shift left by difference in bits */
1564 result = LLVMBuildShl(builder,
1565 src,
1566 lp_build_const_int_vec(gallivm, src_type, db),
1567 "");
1568
1569 if (db <= src_bits) {
1570 /* Enough bits in src to fill the remainder */
1571 LLVMValueRef lower = LLVMBuildLShr(builder,
1572 src,
1573 lp_build_const_int_vec(gallivm, src_type, src_bits - db),
1574 "");
1575
1576 result = LLVMBuildOr(builder, result, lower, "");
1577 } else if (db > src_bits) {
1578 /* Need to repeatedly copy src bits to fill remainder in dst */
1579 unsigned n;
1580
1581 for (n = src_bits; n < dst_bits; n *= 2) {
1582 LLVMValueRef shuv = lp_build_const_int_vec(gallivm, src_type, n);
1583
1584 result = LLVMBuildOr(builder,
1585 result,
1586 LLVMBuildLShr(builder, result, shuv, ""),
1587 "");
1588 }
1589 }
1590 }
1591
1592 return result;
1593 }
1594
1595 /**
1596 * If RT is a smallfloat (needing denorms) format
1597 */
1598 static inline int
1599 have_smallfloat_format(struct lp_type dst_type,
1600 enum pipe_format format)
1601 {
1602 return ((dst_type.floating && dst_type.width != 32) ||
1603 /* due to format handling hacks this format doesn't have floating set
1604 * here (and actually has width set to 32 too) so special case this. */
1605 (format == PIPE_FORMAT_R11G11B10_FLOAT));
1606 }
1607
1608
1609 /**
1610 * Convert from memory format to blending format
1611 *
1612 * e.g. GL_R3G3B2 is 1 byte in memory but 3 bytes for blending
1613 */
1614 static void
1615 convert_to_blend_type(struct gallivm_state *gallivm,
1616 unsigned block_size,
1617 const struct util_format_description *src_fmt,
1618 struct lp_type src_type,
1619 struct lp_type dst_type,
1620 LLVMValueRef* src, // and dst
1621 unsigned num_srcs)
1622 {
1623 LLVMValueRef *dst = src;
1624 LLVMBuilderRef builder = gallivm->builder;
1625 struct lp_type blend_type;
1626 struct lp_type mem_type;
1627 unsigned i, j;
1628 unsigned pixels = block_size / num_srcs;
1629 bool is_arith;
1630
1631 /*
1632 * full custom path for packed floats and srgb formats - none of the later
1633 * functions would do anything useful, and given the lp_type representation they
1634 * can't be fixed. Should really have some SoA blend path for these kind of
1635 * formats rather than hacking them in here.
1636 */
1637 if (format_expands_to_float_soa(src_fmt)) {
1638 LLVMValueRef tmpsrc[4];
1639 /*
1640 * This is pretty suboptimal for this case blending in SoA would be much
1641 * better, since conversion gets us SoA values so need to convert back.
1642 */
1643 assert(src_type.width == 32 || src_type.width == 16);
1644 assert(dst_type.floating);
1645 assert(dst_type.width == 32);
1646 assert(dst_type.length % 4 == 0);
1647 assert(num_srcs % 4 == 0);
1648
1649 if (src_type.width == 16) {
1650 /* expand 4x16bit values to 4x32bit */
1651 struct lp_type type32x4 = src_type;
1652 LLVMTypeRef ltype32x4;
1653 unsigned num_fetch = dst_type.length == 8 ? num_srcs / 2 : num_srcs / 4;
1654 type32x4.width = 32;
1655 ltype32x4 = lp_build_vec_type(gallivm, type32x4);
1656 for (i = 0; i < num_fetch; i++) {
1657 src[i] = LLVMBuildZExt(builder, src[i], ltype32x4, "");
1658 }
1659 src_type.width = 32;
1660 }
1661 for (i = 0; i < 4; i++) {
1662 tmpsrc[i] = src[i];
1663 }
1664 for (i = 0; i < num_srcs / 4; i++) {
1665 LLVMValueRef tmpsoa[4];
1666 LLVMValueRef tmps = tmpsrc[i];
1667 if (dst_type.length == 8) {
1668 LLVMValueRef shuffles[8];
1669 unsigned j;
1670 /* fetch was 4 values but need 8-wide output values */
1671 tmps = lp_build_concat(gallivm, &tmpsrc[i * 2], src_type, 2);
1672 /*
1673 * for 8-wide aos transpose would give us wrong order not matching
1674 * incoming converted fs values and mask. ARGH.
1675 */
1676 for (j = 0; j < 4; j++) {
1677 shuffles[j] = lp_build_const_int32(gallivm, j * 2);
1678 shuffles[j + 4] = lp_build_const_int32(gallivm, j * 2 + 1);
1679 }
1680 tmps = LLVMBuildShuffleVector(builder, tmps, tmps,
1681 LLVMConstVector(shuffles, 8), "");
1682 }
1683 if (src_fmt->format == PIPE_FORMAT_R11G11B10_FLOAT) {
1684 lp_build_r11g11b10_to_float(gallivm, tmps, tmpsoa);
1685 }
1686 else {
1687 lp_build_unpack_rgba_soa(gallivm, src_fmt, dst_type, tmps, tmpsoa);
1688 }
1689 lp_build_transpose_aos(gallivm, dst_type, tmpsoa, &src[i * 4]);
1690 }
1691 return;
1692 }
1693
1694 lp_mem_type_from_format_desc(src_fmt, &mem_type);
1695 lp_blend_type_from_format_desc(src_fmt, &blend_type);
1696
1697 /* Is the format arithmetic */
1698 is_arith = blend_type.length * blend_type.width != mem_type.width * mem_type.length;
1699 is_arith &= !(mem_type.width == 16 && mem_type.floating);
1700
1701 /* Pad if necessary */
1702 if (!is_arith && src_type.length < dst_type.length) {
1703 for (i = 0; i < num_srcs; ++i) {
1704 dst[i] = lp_build_pad_vector(gallivm, src[i], dst_type.length);
1705 }
1706
1707 src_type.length = dst_type.length;
1708 }
1709
1710 /* Special case for half-floats */
1711 if (mem_type.width == 16 && mem_type.floating) {
1712 assert(blend_type.width == 32 && blend_type.floating);
1713 lp_build_conv_auto(gallivm, src_type, &dst_type, dst, num_srcs, dst);
1714 is_arith = false;
1715 }
1716
1717 if (!is_arith) {
1718 return;
1719 }
1720
1721 src_type.width = blend_type.width * blend_type.length;
1722 blend_type.length *= pixels;
1723 src_type.length *= pixels / (src_type.length / mem_type.length);
1724
1725 for (i = 0; i < num_srcs; ++i) {
1726 LLVMValueRef chans[4];
1727 LLVMValueRef res = NULL;
1728
1729 dst[i] = LLVMBuildZExt(builder, src[i], lp_build_vec_type(gallivm, src_type), "");
1730
1731 for (j = 0; j < src_fmt->nr_channels; ++j) {
1732 unsigned mask = 0;
1733 unsigned sa = src_fmt->channel[j].shift;
1734 #if UTIL_ARCH_LITTLE_ENDIAN
1735 unsigned from_lsb = j;
1736 #else
1737 unsigned from_lsb = src_fmt->nr_channels - j - 1;
1738 #endif
1739
1740 mask = (1 << src_fmt->channel[j].size) - 1;
1741
1742 /* Extract bits from source */
1743 chans[j] = LLVMBuildLShr(builder,
1744 dst[i],
1745 lp_build_const_int_vec(gallivm, src_type, sa),
1746 "");
1747
1748 chans[j] = LLVMBuildAnd(builder,
1749 chans[j],
1750 lp_build_const_int_vec(gallivm, src_type, mask),
1751 "");
1752
1753 /* Scale bits */
1754 if (src_type.norm) {
1755 chans[j] = scale_bits(gallivm, src_fmt->channel[j].size,
1756 blend_type.width, chans[j], src_type);
1757 }
1758
1759 /* Insert bits into correct position */
1760 chans[j] = LLVMBuildShl(builder,
1761 chans[j],
1762 lp_build_const_int_vec(gallivm, src_type, from_lsb * blend_type.width),
1763 "");
1764
1765 if (j == 0) {
1766 res = chans[j];
1767 } else {
1768 res = LLVMBuildOr(builder, res, chans[j], "");
1769 }
1770 }
1771
1772 dst[i] = LLVMBuildBitCast(builder, res, lp_build_vec_type(gallivm, blend_type), "");
1773 }
1774 }
1775
1776
1777 /**
1778 * Convert from blending format to memory format
1779 *
1780 * e.g. GL_R3G3B2 is 3 bytes for blending but 1 byte in memory
1781 */
1782 static void
1783 convert_from_blend_type(struct gallivm_state *gallivm,
1784 unsigned block_size,
1785 const struct util_format_description *src_fmt,
1786 struct lp_type src_type,
1787 struct lp_type dst_type,
1788 LLVMValueRef* src, // and dst
1789 unsigned num_srcs)
1790 {
1791 LLVMValueRef* dst = src;
1792 unsigned i, j, k;
1793 struct lp_type mem_type;
1794 struct lp_type blend_type;
1795 LLVMBuilderRef builder = gallivm->builder;
1796 unsigned pixels = block_size / num_srcs;
1797 bool is_arith;
1798
1799 /*
1800 * full custom path for packed floats and srgb formats - none of the later
1801 * functions would do anything useful, and given the lp_type representation they
1802 * can't be fixed. Should really have some SoA blend path for these kind of
1803 * formats rather than hacking them in here.
1804 */
1805 if (format_expands_to_float_soa(src_fmt)) {
1806 /*
1807 * This is pretty suboptimal for this case blending in SoA would be much
1808 * better - we need to transpose the AoS values back to SoA values for
1809 * conversion/packing.
1810 */
1811 assert(src_type.floating);
1812 assert(src_type.width == 32);
1813 assert(src_type.length % 4 == 0);
1814 assert(dst_type.width == 32 || dst_type.width == 16);
1815
1816 for (i = 0; i < num_srcs / 4; i++) {
1817 LLVMValueRef tmpsoa[4], tmpdst;
1818 lp_build_transpose_aos(gallivm, src_type, &src[i * 4], tmpsoa);
1819 /* really really need SoA here */
1820
1821 if (src_fmt->format == PIPE_FORMAT_R11G11B10_FLOAT) {
1822 tmpdst = lp_build_float_to_r11g11b10(gallivm, tmpsoa);
1823 }
1824 else {
1825 tmpdst = lp_build_float_to_srgb_packed(gallivm, src_fmt,
1826 src_type, tmpsoa);
1827 }
1828
1829 if (src_type.length == 8) {
1830 LLVMValueRef tmpaos, shuffles[8];
1831 unsigned j;
1832 /*
1833 * for 8-wide aos transpose has given us wrong order not matching
1834 * output order. HMPF. Also need to split the output values manually.
1835 */
1836 for (j = 0; j < 4; j++) {
1837 shuffles[j * 2] = lp_build_const_int32(gallivm, j);
1838 shuffles[j * 2 + 1] = lp_build_const_int32(gallivm, j + 4);
1839 }
1840 tmpaos = LLVMBuildShuffleVector(builder, tmpdst, tmpdst,
1841 LLVMConstVector(shuffles, 8), "");
1842 src[i * 2] = lp_build_extract_range(gallivm, tmpaos, 0, 4);
1843 src[i * 2 + 1] = lp_build_extract_range(gallivm, tmpaos, 4, 4);
1844 }
1845 else {
1846 src[i] = tmpdst;
1847 }
1848 }
1849 if (dst_type.width == 16) {
1850 struct lp_type type16x8 = dst_type;
1851 struct lp_type type32x4 = dst_type;
1852 LLVMTypeRef ltype16x4, ltypei64, ltypei128;
1853 unsigned num_fetch = src_type.length == 8 ? num_srcs / 2 : num_srcs / 4;
1854 type16x8.length = 8;
1855 type32x4.width = 32;
1856 ltypei128 = LLVMIntTypeInContext(gallivm->context, 128);
1857 ltypei64 = LLVMIntTypeInContext(gallivm->context, 64);
1858 ltype16x4 = lp_build_vec_type(gallivm, dst_type);
1859 /* We could do vector truncation but it doesn't generate very good code */
1860 for (i = 0; i < num_fetch; i++) {
1861 src[i] = lp_build_pack2(gallivm, type32x4, type16x8,
1862 src[i], lp_build_zero(gallivm, type32x4));
1863 src[i] = LLVMBuildBitCast(builder, src[i], ltypei128, "");
1864 src[i] = LLVMBuildTrunc(builder, src[i], ltypei64, "");
1865 src[i] = LLVMBuildBitCast(builder, src[i], ltype16x4, "");
1866 }
1867 }
1868 return;
1869 }
1870
1871 lp_mem_type_from_format_desc(src_fmt, &mem_type);
1872 lp_blend_type_from_format_desc(src_fmt, &blend_type);
1873
1874 is_arith = (blend_type.length * blend_type.width != mem_type.width * mem_type.length);
1875
1876 /* Special case for half-floats */
1877 if (mem_type.width == 16 && mem_type.floating) {
1878 int length = dst_type.length;
1879 assert(blend_type.width == 32 && blend_type.floating);
1880
1881 dst_type.length = src_type.length;
1882
1883 lp_build_conv_auto(gallivm, src_type, &dst_type, dst, num_srcs, dst);
1884
1885 dst_type.length = length;
1886 is_arith = false;
1887 }
1888
1889 /* Remove any padding */
1890 if (!is_arith && (src_type.length % mem_type.length)) {
1891 src_type.length -= (src_type.length % mem_type.length);
1892
1893 for (i = 0; i < num_srcs; ++i) {
1894 dst[i] = lp_build_extract_range(gallivm, dst[i], 0, src_type.length);
1895 }
1896 }
1897
1898 /* No bit arithmetic to do */
1899 if (!is_arith) {
1900 return;
1901 }
1902
1903 src_type.length = pixels;
1904 src_type.width = blend_type.length * blend_type.width;
1905 dst_type.length = pixels;
1906
1907 for (i = 0; i < num_srcs; ++i) {
1908 LLVMValueRef chans[4];
1909 LLVMValueRef res = NULL;
1910
1911 dst[i] = LLVMBuildBitCast(builder, src[i], lp_build_vec_type(gallivm, src_type), "");
1912
1913 for (j = 0; j < src_fmt->nr_channels; ++j) {
1914 unsigned mask = 0;
1915 unsigned sa = src_fmt->channel[j].shift;
1916 unsigned sz_a = src_fmt->channel[j].size;
1917 #if UTIL_ARCH_LITTLE_ENDIAN
1918 unsigned from_lsb = j;
1919 #else
1920 unsigned from_lsb = src_fmt->nr_channels - j - 1;
1921 #endif
1922
1923 assert(blend_type.width > src_fmt->channel[j].size);
1924
1925 for (k = 0; k < blend_type.width; ++k) {
1926 mask |= 1 << k;
1927 }
1928
1929 /* Extract bits */
1930 chans[j] = LLVMBuildLShr(builder,
1931 dst[i],
1932 lp_build_const_int_vec(gallivm, src_type,
1933 from_lsb * blend_type.width),
1934 "");
1935
1936 chans[j] = LLVMBuildAnd(builder,
1937 chans[j],
1938 lp_build_const_int_vec(gallivm, src_type, mask),
1939 "");
1940
1941 /* Scale down bits */
1942 if (src_type.norm) {
1943 chans[j] = scale_bits(gallivm, blend_type.width,
1944 src_fmt->channel[j].size, chans[j], src_type);
1945 } else if (!src_type.floating && sz_a < blend_type.width) {
1946 LLVMValueRef mask_val = lp_build_const_int_vec(gallivm, src_type, (1UL << sz_a) - 1);
1947 LLVMValueRef mask = LLVMBuildICmp(builder, LLVMIntUGT, chans[j], mask_val, "");
1948 chans[j] = LLVMBuildSelect(builder, mask, mask_val, chans[j], "");
1949 }
1950
1951 /* Insert bits */
1952 chans[j] = LLVMBuildShl(builder,
1953 chans[j],
1954 lp_build_const_int_vec(gallivm, src_type, sa),
1955 "");
1956
1957 sa += src_fmt->channel[j].size;
1958
1959 if (j == 0) {
1960 res = chans[j];
1961 } else {
1962 res = LLVMBuildOr(builder, res, chans[j], "");
1963 }
1964 }
1965
1966 assert (dst_type.width != 24);
1967
1968 dst[i] = LLVMBuildTrunc(builder, res, lp_build_vec_type(gallivm, dst_type), "");
1969 }
1970 }
1971
1972
1973 /**
1974 * Convert alpha to same blend type as src
1975 */
1976 static void
1977 convert_alpha(struct gallivm_state *gallivm,
1978 struct lp_type row_type,
1979 struct lp_type alpha_type,
1980 const unsigned block_size,
1981 const unsigned block_height,
1982 const unsigned src_count,
1983 const unsigned dst_channels,
1984 const bool pad_inline,
1985 LLVMValueRef* src_alpha)
1986 {
1987 LLVMBuilderRef builder = gallivm->builder;
1988 unsigned i, j;
1989 unsigned length = row_type.length;
1990 row_type.length = alpha_type.length;
1991
1992 /* Twiddle the alpha to match pixels */
1993 lp_bld_quad_twiddle(gallivm, alpha_type, src_alpha, block_height, src_alpha);
1994
1995 /*
1996 * TODO this should use single lp_build_conv call for
1997 * src_count == 1 && dst_channels == 1 case (dropping the concat below)
1998 */
1999 for (i = 0; i < block_height; ++i) {
2000 lp_build_conv(gallivm, alpha_type, row_type, &src_alpha[i], 1, &src_alpha[i], 1);
2001 }
2002
2003 alpha_type = row_type;
2004 row_type.length = length;
2005
2006 /* If only one channel we can only need the single alpha value per pixel */
2007 if (src_count == 1 && dst_channels == 1) {
2008
2009 lp_build_concat_n(gallivm, alpha_type, src_alpha, block_height, src_alpha, src_count);
2010 } else {
2011 /* If there are more srcs than rows then we need to split alpha up */
2012 if (src_count > block_height) {
2013 for (i = src_count; i > 0; --i) {
2014 unsigned pixels = block_size / src_count;
2015 unsigned idx = i - 1;
2016
2017 src_alpha[idx] = lp_build_extract_range(gallivm, src_alpha[(idx * pixels) / 4],
2018 (idx * pixels) % 4, pixels);
2019 }
2020 }
2021
2022 /* If there is a src for each pixel broadcast the alpha across whole row */
2023 if (src_count == block_size) {
2024 for (i = 0; i < src_count; ++i) {
2025 src_alpha[i] = lp_build_broadcast(gallivm,
2026 lp_build_vec_type(gallivm, row_type), src_alpha[i]);
2027 }
2028 } else {
2029 unsigned pixels = block_size / src_count;
2030 unsigned channels = pad_inline ? TGSI_NUM_CHANNELS : dst_channels;
2031 unsigned alpha_span = 1;
2032 LLVMValueRef shuffles[LP_MAX_VECTOR_LENGTH];
2033
2034 /* Check if we need 2 src_alphas for our shuffles */
2035 if (pixels > alpha_type.length) {
2036 alpha_span = 2;
2037 }
2038
2039 /* Broadcast alpha across all channels, e.g. a1a2 to a1a1a1a1a2a2a2a2 */
2040 for (j = 0; j < row_type.length; ++j) {
2041 if (j < pixels * channels) {
2042 shuffles[j] = lp_build_const_int32(gallivm, j / channels);
2043 } else {
2044 shuffles[j] = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
2045 }
2046 }
2047
2048 for (i = 0; i < src_count; ++i) {
2049 unsigned idx1 = i, idx2 = i;
2050
2051 if (alpha_span > 1){
2052 idx1 *= alpha_span;
2053 idx2 = idx1 + 1;
2054 }
2055
2056 src_alpha[i] = LLVMBuildShuffleVector(builder,
2057 src_alpha[idx1],
2058 src_alpha[idx2],
2059 LLVMConstVector(shuffles, row_type.length),
2060 "");
2061 }
2062 }
2063 }
2064 }
2065
2066
2067 /**
2068 * Generates the blend function for unswizzled colour buffers
2069 * Also generates the read & write from colour buffer
2070 */
2071 static void
2072 generate_unswizzled_blend(struct gallivm_state *gallivm,
2073 unsigned rt,
2074 struct lp_fragment_shader_variant *variant,
2075 enum pipe_format out_format,
2076 unsigned int num_fs,
2077 struct lp_type fs_type,
2078 LLVMValueRef* fs_mask,
2079 LLVMValueRef fs_out_color[PIPE_MAX_COLOR_BUFS][TGSI_NUM_CHANNELS][4],
2080 LLVMValueRef context_ptr,
2081 LLVMValueRef color_ptr,
2082 LLVMValueRef stride,
2083 unsigned partial_mask,
2084 boolean do_branch)
2085 {
2086 const unsigned alpha_channel = 3;
2087 const unsigned block_width = LP_RASTER_BLOCK_SIZE;
2088 const unsigned block_height = LP_RASTER_BLOCK_SIZE;
2089 const unsigned block_size = block_width * block_height;
2090 const unsigned lp_integer_vector_width = 128;
2091
2092 LLVMBuilderRef builder = gallivm->builder;
2093 LLVMValueRef fs_src[4][TGSI_NUM_CHANNELS];
2094 LLVMValueRef fs_src1[4][TGSI_NUM_CHANNELS];
2095 LLVMValueRef src_alpha[4 * 4];
2096 LLVMValueRef src1_alpha[4 * 4] = { NULL };
2097 LLVMValueRef src_mask[4 * 4];
2098 LLVMValueRef src[4 * 4];
2099 LLVMValueRef src1[4 * 4];
2100 LLVMValueRef dst[4 * 4];
2101 LLVMValueRef blend_color;
2102 LLVMValueRef blend_alpha;
2103 LLVMValueRef i32_zero;
2104 LLVMValueRef check_mask;
2105 LLVMValueRef undef_src_val;
2106
2107 struct lp_build_mask_context mask_ctx;
2108 struct lp_type mask_type;
2109 struct lp_type blend_type;
2110 struct lp_type row_type;
2111 struct lp_type dst_type;
2112 struct lp_type ls_type;
2113
2114 unsigned char swizzle[TGSI_NUM_CHANNELS];
2115 unsigned vector_width;
2116 unsigned src_channels = TGSI_NUM_CHANNELS;
2117 unsigned dst_channels;
2118 unsigned dst_count;
2119 unsigned src_count;
2120 unsigned i, j;
2121
2122 const struct util_format_description* out_format_desc = util_format_description(out_format);
2123
2124 unsigned dst_alignment;
2125
2126 bool pad_inline = is_arithmetic_format(out_format_desc);
2127 bool has_alpha = false;
2128 const boolean dual_source_blend = variant->key.blend.rt[0].blend_enable &&
2129 util_blend_state_is_dual(&variant->key.blend, 0);
2130
2131 const boolean is_1d = variant->key.resource_1d;
2132 boolean twiddle_after_convert = FALSE;
2133 unsigned num_fullblock_fs = is_1d ? 2 * num_fs : num_fs;
2134 LLVMValueRef fpstate = 0;
2135
2136 /* Get type from output format */
2137 lp_blend_type_from_format_desc(out_format_desc, &row_type);
2138 lp_mem_type_from_format_desc(out_format_desc, &dst_type);
2139
2140 /*
2141 * Technically this code should go into lp_build_smallfloat_to_float
2142 * and lp_build_float_to_smallfloat but due to the
2143 * http://llvm.org/bugs/show_bug.cgi?id=6393
2144 * llvm reorders the mxcsr intrinsics in a way that breaks the code.
2145 * So the ordering is important here and there shouldn't be any
2146 * llvm ir instrunctions in this function before
2147 * this, otherwise half-float format conversions won't work
2148 * (again due to llvm bug #6393).
2149 */
2150 if (have_smallfloat_format(dst_type, out_format)) {
2151 /* We need to make sure that denorms are ok for half float
2152 conversions */
2153 fpstate = lp_build_fpstate_get(gallivm);
2154 lp_build_fpstate_set_denorms_zero(gallivm, FALSE);
2155 }
2156
2157 mask_type = lp_int32_vec4_type();
2158 mask_type.length = fs_type.length;
2159
2160 for (i = num_fs; i < num_fullblock_fs; i++) {
2161 fs_mask[i] = lp_build_zero(gallivm, mask_type);
2162 }
2163
2164 /* Do not bother executing code when mask is empty.. */
2165 if (do_branch) {
2166 check_mask = LLVMConstNull(lp_build_int_vec_type(gallivm, mask_type));
2167
2168 for (i = 0; i < num_fullblock_fs; ++i) {
2169 check_mask = LLVMBuildOr(builder, check_mask, fs_mask[i], "");
2170 }
2171
2172 lp_build_mask_begin(&mask_ctx, gallivm, mask_type, check_mask);
2173 lp_build_mask_check(&mask_ctx);
2174 }
2175
2176 partial_mask |= !variant->opaque;
2177 i32_zero = lp_build_const_int32(gallivm, 0);
2178
2179 undef_src_val = lp_build_undef(gallivm, fs_type);
2180
2181 row_type.length = fs_type.length;
2182 vector_width = dst_type.floating ? lp_native_vector_width : lp_integer_vector_width;
2183
2184 /* Compute correct swizzle and count channels */
2185 memset(swizzle, LP_BLD_SWIZZLE_DONTCARE, TGSI_NUM_CHANNELS);
2186 dst_channels = 0;
2187
2188 for (i = 0; i < TGSI_NUM_CHANNELS; ++i) {
2189 /* Ensure channel is used */
2190 if (out_format_desc->swizzle[i] >= TGSI_NUM_CHANNELS) {
2191 continue;
2192 }
2193
2194 /* Ensure not already written to (happens in case with GL_ALPHA) */
2195 if (swizzle[out_format_desc->swizzle[i]] < TGSI_NUM_CHANNELS) {
2196 continue;
2197 }
2198
2199 /* Ensure we havn't already found all channels */
2200 if (dst_channels >= out_format_desc->nr_channels) {
2201 continue;
2202 }
2203
2204 swizzle[out_format_desc->swizzle[i]] = i;
2205 ++dst_channels;
2206
2207 if (i == alpha_channel) {
2208 has_alpha = true;
2209 }
2210 }
2211
2212 if (format_expands_to_float_soa(out_format_desc)) {
2213 /*
2214 * the code above can't work for layout_other
2215 * for srgb it would sort of work but we short-circuit swizzles, etc.
2216 * as that is done as part of unpack / pack.
2217 */
2218 dst_channels = 4; /* HACK: this is fake 4 really but need it due to transpose stuff later */
2219 has_alpha = true;
2220 swizzle[0] = 0;
2221 swizzle[1] = 1;
2222 swizzle[2] = 2;
2223 swizzle[3] = 3;
2224 pad_inline = true; /* HACK: prevent rgbxrgbx->rgbrgbxx conversion later */
2225 }
2226
2227 /* If 3 channels then pad to include alpha for 4 element transpose */
2228 if (dst_channels == 3) {
2229 assert (!has_alpha);
2230 for (i = 0; i < TGSI_NUM_CHANNELS; i++) {
2231 if (swizzle[i] > TGSI_NUM_CHANNELS)
2232 swizzle[i] = 3;
2233 }
2234 if (out_format_desc->nr_channels == 4) {
2235 dst_channels = 4;
2236 /*
2237 * We use alpha from the color conversion, not separate one.
2238 * We had to include it for transpose, hence it will get converted
2239 * too (albeit when doing transpose after conversion, that would
2240 * no longer be the case necessarily).
2241 * (It works only with 4 channel dsts, e.g. rgbx formats, because
2242 * otherwise we really have padding, not alpha, included.)
2243 */
2244 has_alpha = true;
2245 }
2246 }
2247
2248 /*
2249 * Load shader output
2250 */
2251 for (i = 0; i < num_fullblock_fs; ++i) {
2252 /* Always load alpha for use in blending */
2253 LLVMValueRef alpha;
2254 if (i < num_fs) {
2255 alpha = LLVMBuildLoad(builder, fs_out_color[rt][alpha_channel][i], "");
2256 }
2257 else {
2258 alpha = undef_src_val;
2259 }
2260
2261 /* Load each channel */
2262 for (j = 0; j < dst_channels; ++j) {
2263 assert(swizzle[j] < 4);
2264 if (i < num_fs) {
2265 fs_src[i][j] = LLVMBuildLoad(builder, fs_out_color[rt][swizzle[j]][i], "");
2266 }
2267 else {
2268 fs_src[i][j] = undef_src_val;
2269 }
2270 }
2271
2272 /* If 3 channels then pad to include alpha for 4 element transpose */
2273 /*
2274 * XXX If we include that here maybe could actually use it instead of
2275 * separate alpha for blending?
2276 * (Difficult though we actually convert pad channels, not alpha.)
2277 */
2278 if (dst_channels == 3 && !has_alpha) {
2279 fs_src[i][3] = alpha;
2280 }
2281
2282 /* We split the row_mask and row_alpha as we want 128bit interleave */
2283 if (fs_type.length == 8) {
2284 src_mask[i*2 + 0] = lp_build_extract_range(gallivm, fs_mask[i],
2285 0, src_channels);
2286 src_mask[i*2 + 1] = lp_build_extract_range(gallivm, fs_mask[i],
2287 src_channels, src_channels);
2288
2289 src_alpha[i*2 + 0] = lp_build_extract_range(gallivm, alpha, 0, src_channels);
2290 src_alpha[i*2 + 1] = lp_build_extract_range(gallivm, alpha,
2291 src_channels, src_channels);
2292 } else {
2293 src_mask[i] = fs_mask[i];
2294 src_alpha[i] = alpha;
2295 }
2296 }
2297 if (dual_source_blend) {
2298 /* same as above except different src/dst, skip masks and comments... */
2299 for (i = 0; i < num_fullblock_fs; ++i) {
2300 LLVMValueRef alpha;
2301 if (i < num_fs) {
2302 alpha = LLVMBuildLoad(builder, fs_out_color[1][alpha_channel][i], "");
2303 }
2304 else {
2305 alpha = undef_src_val;
2306 }
2307
2308 for (j = 0; j < dst_channels; ++j) {
2309 assert(swizzle[j] < 4);
2310 if (i < num_fs) {
2311 fs_src1[i][j] = LLVMBuildLoad(builder, fs_out_color[1][swizzle[j]][i], "");
2312 }
2313 else {
2314 fs_src1[i][j] = undef_src_val;
2315 }
2316 }
2317 if (dst_channels == 3 && !has_alpha) {
2318 fs_src1[i][3] = alpha;
2319 }
2320 if (fs_type.length == 8) {
2321 src1_alpha[i*2 + 0] = lp_build_extract_range(gallivm, alpha, 0, src_channels);
2322 src1_alpha[i*2 + 1] = lp_build_extract_range(gallivm, alpha,
2323 src_channels, src_channels);
2324 } else {
2325 src1_alpha[i] = alpha;
2326 }
2327 }
2328 }
2329
2330 if (util_format_is_pure_integer(out_format)) {
2331 /*
2332 * In this case fs_type was really ints or uints disguised as floats,
2333 * fix that up now.
2334 */
2335 fs_type.floating = 0;
2336 fs_type.sign = dst_type.sign;
2337 for (i = 0; i < num_fullblock_fs; ++i) {
2338 for (j = 0; j < dst_channels; ++j) {
2339 fs_src[i][j] = LLVMBuildBitCast(builder, fs_src[i][j],
2340 lp_build_vec_type(gallivm, fs_type), "");
2341 }
2342 if (dst_channels == 3 && !has_alpha) {
2343 fs_src[i][3] = LLVMBuildBitCast(builder, fs_src[i][3],
2344 lp_build_vec_type(gallivm, fs_type), "");
2345 }
2346 }
2347 }
2348
2349 /*
2350 * We actually should generally do conversion first (for non-1d cases)
2351 * when the blend format is 8 or 16 bits. The reason is obvious,
2352 * there's 2 or 4 times less vectors to deal with for the interleave...
2353 * Albeit for the AVX (not AVX2) case there's no benefit with 16 bit
2354 * vectors (as it can do 32bit unpack with 256bit vectors, but 8/16bit
2355 * unpack only with 128bit vectors).
2356 * Note: for 16bit sizes really need matching pack conversion code
2357 */
2358 if (!is_1d && dst_channels != 3 && dst_type.width == 8) {
2359 twiddle_after_convert = TRUE;
2360 }
2361
2362 /*
2363 * Pixel twiddle from fragment shader order to memory order
2364 */
2365 if (!twiddle_after_convert) {
2366 src_count = generate_fs_twiddle(gallivm, fs_type, num_fullblock_fs,
2367 dst_channels, fs_src, src, pad_inline);
2368 if (dual_source_blend) {
2369 generate_fs_twiddle(gallivm, fs_type, num_fullblock_fs, dst_channels,
2370 fs_src1, src1, pad_inline);
2371 }
2372 } else {
2373 src_count = num_fullblock_fs * dst_channels;
2374 /*
2375 * We reorder things a bit here, so the cases for 4-wide and 8-wide
2376 * (AVX) turn out the same later when untwiddling/transpose (albeit
2377 * for true AVX2 path untwiddle needs to be different).
2378 * For now just order by colors first (so we can use unpack later).
2379 */
2380 for (j = 0; j < num_fullblock_fs; j++) {
2381 for (i = 0; i < dst_channels; i++) {
2382 src[i*num_fullblock_fs + j] = fs_src[j][i];
2383 if (dual_source_blend) {
2384 src1[i*num_fullblock_fs + j] = fs_src1[j][i];
2385 }
2386 }
2387 }
2388 }
2389
2390 src_channels = dst_channels < 3 ? dst_channels : 4;
2391 if (src_count != num_fullblock_fs * src_channels) {
2392 unsigned ds = src_count / (num_fullblock_fs * src_channels);
2393 row_type.length /= ds;
2394 fs_type.length = row_type.length;
2395 }
2396
2397 blend_type = row_type;
2398 mask_type.length = 4;
2399
2400 /* Convert src to row_type */
2401 if (dual_source_blend) {
2402 struct lp_type old_row_type = row_type;
2403 lp_build_conv_auto(gallivm, fs_type, &row_type, src, src_count, src);
2404 src_count = lp_build_conv_auto(gallivm, fs_type, &old_row_type, src1, src_count, src1);
2405 }
2406 else {
2407 src_count = lp_build_conv_auto(gallivm, fs_type, &row_type, src, src_count, src);
2408 }
2409
2410 /* If the rows are not an SSE vector, combine them to become SSE size! */
2411 if ((row_type.width * row_type.length) % 128) {
2412 unsigned bits = row_type.width * row_type.length;
2413 unsigned combined;
2414
2415 assert(src_count >= (vector_width / bits));
2416
2417 dst_count = src_count / (vector_width / bits);
2418
2419 combined = lp_build_concat_n(gallivm, row_type, src, src_count, src, dst_count);
2420 if (dual_source_blend) {
2421 lp_build_concat_n(gallivm, row_type, src1, src_count, src1, dst_count);
2422 }
2423
2424 row_type.length *= combined;
2425 src_count /= combined;
2426
2427 bits = row_type.width * row_type.length;
2428 assert(bits == 128 || bits == 256);
2429 }
2430
2431 if (twiddle_after_convert) {
2432 fs_twiddle_transpose(gallivm, row_type, src, src_count, src);
2433 if (dual_source_blend) {
2434 fs_twiddle_transpose(gallivm, row_type, src1, src_count, src1);
2435 }
2436 }
2437
2438 /*
2439 * Blend Colour conversion
2440 */
2441 blend_color = lp_jit_context_f_blend_color(gallivm, context_ptr);
2442 blend_color = LLVMBuildPointerCast(builder, blend_color,
2443 LLVMPointerType(lp_build_vec_type(gallivm, fs_type), 0), "");
2444 blend_color = LLVMBuildLoad(builder, LLVMBuildGEP(builder, blend_color,
2445 &i32_zero, 1, ""), "");
2446
2447 /* Convert */
2448 lp_build_conv(gallivm, fs_type, blend_type, &blend_color, 1, &blend_color, 1);
2449
2450 if (out_format_desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB) {
2451 /*
2452 * since blending is done with floats, there was no conversion.
2453 * However, the rules according to fixed point renderbuffers still
2454 * apply, that is we must clamp inputs to 0.0/1.0.
2455 * (This would apply to separate alpha conversion too but we currently
2456 * force has_alpha to be true.)
2457 * TODO: should skip this with "fake" blend, since post-blend conversion
2458 * will clamp anyway.
2459 * TODO: could also skip this if fragment color clamping is enabled. We
2460 * don't support it natively so it gets baked into the shader however, so
2461 * can't really tell here.
2462 */
2463 struct lp_build_context f32_bld;
2464 assert(row_type.floating);
2465 lp_build_context_init(&f32_bld, gallivm, row_type);
2466 for (i = 0; i < src_count; i++) {
2467 src[i] = lp_build_clamp_zero_one_nanzero(&f32_bld, src[i]);
2468 }
2469 if (dual_source_blend) {
2470 for (i = 0; i < src_count; i++) {
2471 src1[i] = lp_build_clamp_zero_one_nanzero(&f32_bld, src1[i]);
2472 }
2473 }
2474 /* probably can't be different than row_type but better safe than sorry... */
2475 lp_build_context_init(&f32_bld, gallivm, blend_type);
2476 blend_color = lp_build_clamp(&f32_bld, blend_color, f32_bld.zero, f32_bld.one);
2477 }
2478
2479 /* Extract alpha */
2480 blend_alpha = lp_build_extract_broadcast(gallivm, blend_type, row_type, blend_color, lp_build_const_int32(gallivm, 3));
2481
2482 /* Swizzle to appropriate channels, e.g. from RGBA to BGRA BGRA */
2483 pad_inline &= (dst_channels * (block_size / src_count) * row_type.width) != vector_width;
2484 if (pad_inline) {
2485 /* Use all 4 channels e.g. from RGBA RGBA to RGxx RGxx */
2486 blend_color = lp_build_swizzle_aos_n(gallivm, blend_color, swizzle, TGSI_NUM_CHANNELS, row_type.length);
2487 } else {
2488 /* Only use dst_channels e.g. RGBA RGBA to RG RG xxxx */
2489 blend_color = lp_build_swizzle_aos_n(gallivm, blend_color, swizzle, dst_channels, row_type.length);
2490 }
2491
2492 /*
2493 * Mask conversion
2494 */
2495 lp_bld_quad_twiddle(gallivm, mask_type, &src_mask[0], block_height, &src_mask[0]);
2496
2497 if (src_count < block_height) {
2498 lp_build_concat_n(gallivm, mask_type, src_mask, 4, src_mask, src_count);
2499 } else if (src_count > block_height) {
2500 for (i = src_count; i > 0; --i) {
2501 unsigned pixels = block_size / src_count;
2502 unsigned idx = i - 1;
2503
2504 src_mask[idx] = lp_build_extract_range(gallivm, src_mask[(idx * pixels) / 4],
2505 (idx * pixels) % 4, pixels);
2506 }
2507 }
2508
2509 assert(mask_type.width == 32);
2510
2511 for (i = 0; i < src_count; ++i) {
2512 unsigned pixels = block_size / src_count;
2513 unsigned pixel_width = row_type.width * dst_channels;
2514
2515 if (pixel_width == 24) {
2516 mask_type.width = 8;
2517 mask_type.length = vector_width / mask_type.width;
2518 } else {
2519 mask_type.length = pixels;
2520 mask_type.width = row_type.width * dst_channels;
2521
2522 /*
2523 * If mask_type width is smaller than 32bit, this doesn't quite
2524 * generate the most efficient code (could use some pack).
2525 */
2526 src_mask[i] = LLVMBuildIntCast(builder, src_mask[i],
2527 lp_build_int_vec_type(gallivm, mask_type), "");
2528
2529 mask_type.length *= dst_channels;
2530 mask_type.width /= dst_channels;
2531 }
2532
2533 src_mask[i] = LLVMBuildBitCast(builder, src_mask[i],
2534 lp_build_int_vec_type(gallivm, mask_type), "");
2535 src_mask[i] = lp_build_pad_vector(gallivm, src_mask[i], row_type.length);
2536 }
2537
2538 /*
2539 * Alpha conversion
2540 */
2541 if (!has_alpha) {
2542 struct lp_type alpha_type = fs_type;
2543 alpha_type.length = 4;
2544 convert_alpha(gallivm, row_type, alpha_type,
2545 block_size, block_height,
2546 src_count, dst_channels,
2547 pad_inline, src_alpha);
2548 if (dual_source_blend) {
2549 convert_alpha(gallivm, row_type, alpha_type,
2550 block_size, block_height,
2551 src_count, dst_channels,
2552 pad_inline, src1_alpha);
2553 }
2554 }
2555
2556
2557 /*
2558 * Load dst from memory
2559 */
2560 if (src_count < block_height) {
2561 dst_count = block_height;
2562 } else {
2563 dst_count = src_count;
2564 }
2565
2566 dst_type.length *= block_size / dst_count;
2567
2568 if (format_expands_to_float_soa(out_format_desc)) {
2569 /*
2570 * we need multiple values at once for the conversion, so can as well
2571 * load them vectorized here too instead of concatenating later.
2572 * (Still need concatenation later for 8-wide vectors).
2573 */
2574 dst_count = block_height;
2575 dst_type.length = block_width;
2576 }
2577
2578 /*
2579 * Compute the alignment of the destination pointer in bytes
2580 * We fetch 1-4 pixels, if the format has pot alignment then those fetches
2581 * are always aligned by MIN2(16, fetch_width) except for buffers (not
2582 * 1d tex but can't distinguish here) so need to stick with per-pixel
2583 * alignment in this case.
2584 */
2585 if (is_1d) {
2586 dst_alignment = (out_format_desc->block.bits + 7)/(out_format_desc->block.width * 8);
2587 }
2588 else {
2589 dst_alignment = dst_type.length * dst_type.width / 8;
2590 }
2591 /* Force power-of-two alignment by extracting only the least-significant-bit */
2592 dst_alignment = 1 << (ffs(dst_alignment) - 1);
2593 /*
2594 * Resource base and stride pointers are aligned to 16 bytes, so that's
2595 * the maximum alignment we can guarantee
2596 */
2597 dst_alignment = MIN2(16, dst_alignment);
2598
2599 ls_type = dst_type;
2600
2601 if (dst_count > src_count) {
2602 if ((dst_type.width == 8 || dst_type.width == 16) &&
2603 util_is_power_of_two_or_zero(dst_type.length) &&
2604 dst_type.length * dst_type.width < 128) {
2605 /*
2606 * Never try to load values as 4xi8 which we will then
2607 * concatenate to larger vectors. This gives llvm a real
2608 * headache (the problem is the type legalizer (?) will
2609 * try to load that as 4xi8 zext to 4xi32 to fill the vector,
2610 * then the shuffles to concatenate are more or less impossible
2611 * - llvm is easily capable of generating a sequence of 32
2612 * pextrb/pinsrb instructions for that. Albeit it appears to
2613 * be fixed in llvm 4.0. So, load and concatenate with 32bit
2614 * width to avoid the trouble (16bit seems not as bad, llvm
2615 * probably recognizes the load+shuffle as only one shuffle
2616 * is necessary, but we can do just the same anyway).
2617 */
2618 ls_type.length = dst_type.length * dst_type.width / 32;
2619 ls_type.width = 32;
2620 }
2621 }
2622
2623 if (is_1d) {
2624 load_unswizzled_block(gallivm, color_ptr, stride, block_width, 1,
2625 dst, ls_type, dst_count / 4, dst_alignment);
2626 for (i = dst_count / 4; i < dst_count; i++) {
2627 dst[i] = lp_build_undef(gallivm, ls_type);
2628 }
2629
2630 }
2631 else {
2632 load_unswizzled_block(gallivm, color_ptr, stride, block_width, block_height,
2633 dst, ls_type, dst_count, dst_alignment);
2634 }
2635
2636
2637 /*
2638 * Convert from dst/output format to src/blending format.
2639 *
2640 * This is necessary as we can only read 1 row from memory at a time,
2641 * so the minimum dst_count will ever be at this point is 4.
2642 *
2643 * With, for example, R8 format you can have all 16 pixels in a 128 bit vector,
2644 * this will take the 4 dsts and combine them into 1 src so we can perform blending
2645 * on all 16 pixels in that single vector at once.
2646 */
2647 if (dst_count > src_count) {
2648 if (ls_type.length != dst_type.length && ls_type.length == 1) {
2649 LLVMTypeRef elem_type = lp_build_elem_type(gallivm, ls_type);
2650 LLVMTypeRef ls_vec_type = LLVMVectorType(elem_type, 1);
2651 for (i = 0; i < dst_count; i++) {
2652 dst[i] = LLVMBuildBitCast(builder, dst[i], ls_vec_type, "");
2653 }
2654 }
2655
2656 lp_build_concat_n(gallivm, ls_type, dst, 4, dst, src_count);
2657
2658 if (ls_type.length != dst_type.length) {
2659 struct lp_type tmp_type = dst_type;
2660 tmp_type.length = dst_type.length * 4 / src_count;
2661 for (i = 0; i < src_count; i++) {
2662 dst[i] = LLVMBuildBitCast(builder, dst[i],
2663 lp_build_vec_type(gallivm, tmp_type), "");
2664 }
2665 }
2666 }
2667
2668 /*
2669 * Blending
2670 */
2671 /* XXX this is broken for RGB8 formats -
2672 * they get expanded from 12 to 16 elements (to include alpha)
2673 * by convert_to_blend_type then reduced to 15 instead of 12
2674 * by convert_from_blend_type (a simple fix though breaks A8...).
2675 * R16G16B16 also crashes differently however something going wrong
2676 * inside llvm handling npot vector sizes seemingly.
2677 * It seems some cleanup could be done here (like skipping conversion/blend
2678 * when not needed).
2679 */
2680 convert_to_blend_type(gallivm, block_size, out_format_desc, dst_type,
2681 row_type, dst, src_count);
2682
2683 /*
2684 * FIXME: Really should get logic ops / masks out of generic blend / row
2685 * format. Logic ops will definitely not work on the blend float format
2686 * used for SRGB here and I think OpenGL expects this to work as expected
2687 * (that is incoming values converted to srgb then logic op applied).
2688 */
2689 for (i = 0; i < src_count; ++i) {
2690 dst[i] = lp_build_blend_aos(gallivm,
2691 &variant->key.blend,
2692 out_format,
2693 row_type,
2694 rt,
2695 src[i],
2696 has_alpha ? NULL : src_alpha[i],
2697 src1[i],
2698 has_alpha ? NULL : src1_alpha[i],
2699 dst[i],
2700 partial_mask ? src_mask[i] : NULL,
2701 blend_color,
2702 has_alpha ? NULL : blend_alpha,
2703 swizzle,
2704 pad_inline ? 4 : dst_channels);
2705 }
2706
2707 convert_from_blend_type(gallivm, block_size, out_format_desc,
2708 row_type, dst_type, dst, src_count);
2709
2710 /* Split the blend rows back to memory rows */
2711 if (dst_count > src_count) {
2712 row_type.length = dst_type.length * (dst_count / src_count);
2713
2714 if (src_count == 1) {
2715 dst[1] = lp_build_extract_range(gallivm, dst[0], row_type.length / 2, row_type.length / 2);
2716 dst[0] = lp_build_extract_range(gallivm, dst[0], 0, row_type.length / 2);
2717
2718 row_type.length /= 2;
2719 src_count *= 2;
2720 }
2721
2722 dst[3] = lp_build_extract_range(gallivm, dst[1], row_type.length / 2, row_type.length / 2);
2723 dst[2] = lp_build_extract_range(gallivm, dst[1], 0, row_type.length / 2);
2724 dst[1] = lp_build_extract_range(gallivm, dst[0], row_type.length / 2, row_type.length / 2);
2725 dst[0] = lp_build_extract_range(gallivm, dst[0], 0, row_type.length / 2);
2726
2727 row_type.length /= 2;
2728 src_count *= 2;
2729 }
2730
2731 /*
2732 * Store blend result to memory
2733 */
2734 if (is_1d) {
2735 store_unswizzled_block(gallivm, color_ptr, stride, block_width, 1,
2736 dst, dst_type, dst_count / 4, dst_alignment);
2737 }
2738 else {
2739 store_unswizzled_block(gallivm, color_ptr, stride, block_width, block_height,
2740 dst, dst_type, dst_count, dst_alignment);
2741 }
2742
2743 if (have_smallfloat_format(dst_type, out_format)) {
2744 lp_build_fpstate_set(gallivm, fpstate);
2745 }
2746
2747 if (do_branch) {
2748 lp_build_mask_end(&mask_ctx);
2749 }
2750 }
2751
2752
2753 /**
2754 * Generate the runtime callable function for the whole fragment pipeline.
2755 * Note that the function which we generate operates on a block of 16
2756 * pixels at at time. The block contains 2x2 quads. Each quad contains
2757 * 2x2 pixels.
2758 */
2759 static void
2760 generate_fragment(struct llvmpipe_context *lp,
2761 struct lp_fragment_shader *shader,
2762 struct lp_fragment_shader_variant *variant,
2763 unsigned partial_mask)
2764 {
2765 struct gallivm_state *gallivm = variant->gallivm;
2766 struct lp_fragment_shader_variant_key *key = &variant->key;
2767 struct lp_shader_input inputs[PIPE_MAX_SHADER_INPUTS];
2768 char func_name[64];
2769 struct lp_type fs_type;
2770 struct lp_type blend_type;
2771 LLVMTypeRef fs_elem_type;
2772 LLVMTypeRef blend_vec_type;
2773 LLVMTypeRef arg_types[15];
2774 LLVMTypeRef func_type;
2775 LLVMTypeRef int32_type = LLVMInt32TypeInContext(gallivm->context);
2776 LLVMTypeRef int8_type = LLVMInt8TypeInContext(gallivm->context);
2777 LLVMValueRef context_ptr;
2778 LLVMValueRef x;
2779 LLVMValueRef y;
2780 LLVMValueRef a0_ptr;
2781 LLVMValueRef dadx_ptr;
2782 LLVMValueRef dady_ptr;
2783 LLVMValueRef color_ptr_ptr;
2784 LLVMValueRef stride_ptr;
2785 LLVMValueRef color_sample_stride_ptr;
2786 LLVMValueRef depth_ptr;
2787 LLVMValueRef depth_stride;
2788 LLVMValueRef depth_sample_stride;
2789 LLVMValueRef mask_input;
2790 LLVMValueRef thread_data_ptr;
2791 LLVMBasicBlockRef block;
2792 LLVMBuilderRef builder;
2793 struct lp_build_sampler_soa *sampler;
2794 struct lp_build_image_soa *image;
2795 struct lp_build_interp_soa_context interp;
2796 LLVMValueRef fs_mask[(16 / 4) * LP_MAX_SAMPLES];
2797 LLVMValueRef fs_out_color[LP_MAX_SAMPLES][PIPE_MAX_COLOR_BUFS][TGSI_NUM_CHANNELS][16 / 4];
2798 LLVMValueRef function;
2799 LLVMValueRef facing;
2800 unsigned num_fs;
2801 unsigned i;
2802 unsigned chan;
2803 unsigned cbuf;
2804 boolean cbuf0_write_all;
2805 const boolean dual_source_blend = key->blend.rt[0].blend_enable &&
2806 util_blend_state_is_dual(&key->blend, 0);
2807
2808 assert(lp_native_vector_width / 32 >= 4);
2809
2810 /* Adjust color input interpolation according to flatshade state:
2811 */
2812 memcpy(inputs, shader->inputs, shader->info.base.num_inputs * sizeof inputs[0]);
2813 for (i = 0; i < shader->info.base.num_inputs; i++) {
2814 if (inputs[i].interp == LP_INTERP_COLOR) {
2815 if (key->flatshade)
2816 inputs[i].interp = LP_INTERP_CONSTANT;
2817 else
2818 inputs[i].interp = LP_INTERP_PERSPECTIVE;
2819 }
2820 }
2821
2822 /* check if writes to cbuf[0] are to be copied to all cbufs */
2823 cbuf0_write_all =
2824 shader->info.base.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS];
2825
2826 /* TODO: actually pick these based on the fs and color buffer
2827 * characteristics. */
2828
2829 memset(&fs_type, 0, sizeof fs_type);
2830 fs_type.floating = TRUE; /* floating point values */
2831 fs_type.sign = TRUE; /* values are signed */
2832 fs_type.norm = FALSE; /* values are not limited to [0,1] or [-1,1] */
2833 fs_type.width = 32; /* 32-bit float */
2834 fs_type.length = MIN2(lp_native_vector_width / 32, 16); /* n*4 elements per vector */
2835
2836 memset(&blend_type, 0, sizeof blend_type);
2837 blend_type.floating = FALSE; /* values are integers */
2838 blend_type.sign = FALSE; /* values are unsigned */
2839 blend_type.norm = TRUE; /* values are in [0,1] or [-1,1] */
2840 blend_type.width = 8; /* 8-bit ubyte values */
2841 blend_type.length = 16; /* 16 elements per vector */
2842
2843 /*
2844 * Generate the function prototype. Any change here must be reflected in
2845 * lp_jit.h's lp_jit_frag_func function pointer type, and vice-versa.
2846 */
2847
2848 fs_elem_type = lp_build_elem_type(gallivm, fs_type);
2849
2850 blend_vec_type = lp_build_vec_type(gallivm, blend_type);
2851
2852 snprintf(func_name, sizeof(func_name), "fs_variant_%s",
2853 partial_mask ? "partial" : "whole");
2854
2855 arg_types[0] = variant->jit_context_ptr_type; /* context */
2856 arg_types[1] = int32_type; /* x */
2857 arg_types[2] = int32_type; /* y */
2858 arg_types[3] = int32_type; /* facing */
2859 arg_types[4] = LLVMPointerType(fs_elem_type, 0); /* a0 */
2860 arg_types[5] = LLVMPointerType(fs_elem_type, 0); /* dadx */
2861 arg_types[6] = LLVMPointerType(fs_elem_type, 0); /* dady */
2862 arg_types[7] = LLVMPointerType(LLVMPointerType(int8_type, 0), 0); /* color */
2863 arg_types[8] = LLVMPointerType(int8_type, 0); /* depth */
2864 arg_types[9] = LLVMInt64TypeInContext(gallivm->context); /* mask_input */
2865 arg_types[10] = variant->jit_thread_data_ptr_type; /* per thread data */
2866 arg_types[11] = LLVMPointerType(int32_type, 0); /* stride */
2867 arg_types[12] = int32_type; /* depth_stride */
2868 arg_types[13] = LLVMPointerType(int32_type, 0); /* color sample strides */
2869 arg_types[14] = int32_type; /* depth sample stride */
2870
2871 func_type = LLVMFunctionType(LLVMVoidTypeInContext(gallivm->context),
2872 arg_types, ARRAY_SIZE(arg_types), 0);
2873
2874 function = LLVMAddFunction(gallivm->module, func_name, func_type);
2875 LLVMSetFunctionCallConv(function, LLVMCCallConv);
2876
2877 variant->function[partial_mask] = function;
2878
2879 /* XXX: need to propagate noalias down into color param now we are
2880 * passing a pointer-to-pointer?
2881 */
2882 for(i = 0; i < ARRAY_SIZE(arg_types); ++i)
2883 if(LLVMGetTypeKind(arg_types[i]) == LLVMPointerTypeKind)
2884 lp_add_function_attr(function, i + 1, LP_FUNC_ATTR_NOALIAS);
2885
2886 if (variant->gallivm->cache->data_size)
2887 return;
2888
2889 context_ptr = LLVMGetParam(function, 0);
2890 x = LLVMGetParam(function, 1);
2891 y = LLVMGetParam(function, 2);
2892 facing = LLVMGetParam(function, 3);
2893 a0_ptr = LLVMGetParam(function, 4);
2894 dadx_ptr = LLVMGetParam(function, 5);
2895 dady_ptr = LLVMGetParam(function, 6);
2896 color_ptr_ptr = LLVMGetParam(function, 7);
2897 depth_ptr = LLVMGetParam(function, 8);
2898 mask_input = LLVMGetParam(function, 9);
2899 thread_data_ptr = LLVMGetParam(function, 10);
2900 stride_ptr = LLVMGetParam(function, 11);
2901 depth_stride = LLVMGetParam(function, 12);
2902 color_sample_stride_ptr = LLVMGetParam(function, 13);
2903 depth_sample_stride = LLVMGetParam(function, 14);
2904
2905 lp_build_name(context_ptr, "context");
2906 lp_build_name(x, "x");
2907 lp_build_name(y, "y");
2908 lp_build_name(a0_ptr, "a0");
2909 lp_build_name(dadx_ptr, "dadx");
2910 lp_build_name(dady_ptr, "dady");
2911 lp_build_name(color_ptr_ptr, "color_ptr_ptr");
2912 lp_build_name(depth_ptr, "depth");
2913 lp_build_name(mask_input, "mask_input");
2914 lp_build_name(thread_data_ptr, "thread_data");
2915 lp_build_name(stride_ptr, "stride_ptr");
2916 lp_build_name(depth_stride, "depth_stride");
2917 lp_build_name(color_sample_stride_ptr, "color_sample_stride_ptr");
2918 lp_build_name(depth_sample_stride, "depth_sample_stride");
2919
2920 /*
2921 * Function body
2922 */
2923
2924 block = LLVMAppendBasicBlockInContext(gallivm->context, function, "entry");
2925 builder = gallivm->builder;
2926 assert(builder);
2927 LLVMPositionBuilderAtEnd(builder, block);
2928
2929 /*
2930 * Must not count ps invocations if there's a null shader.
2931 * (It would be ok to count with null shader if there's d/s tests,
2932 * but only if there's d/s buffers too, which is different
2933 * to implicit rasterization disable which must not depend
2934 * on the d/s buffers.)
2935 * Could use popcount on mask, but pixel accuracy is not required.
2936 * Could disable if there's no stats query, but maybe not worth it.
2937 */
2938 if (shader->info.base.num_instructions > 1) {
2939 LLVMValueRef invocs, val;
2940 invocs = lp_jit_thread_data_invocations(gallivm, thread_data_ptr);
2941 val = LLVMBuildLoad(builder, invocs, "");
2942 val = LLVMBuildAdd(builder, val,
2943 LLVMConstInt(LLVMInt64TypeInContext(gallivm->context), 1, 0),
2944 "invoc_count");
2945 LLVMBuildStore(builder, val, invocs);
2946 }
2947
2948 /* code generated texture sampling */
2949 sampler = lp_llvm_sampler_soa_create(key->samplers, key->nr_samplers);
2950 image = lp_llvm_image_soa_create(lp_fs_variant_key_images(key), key->nr_images);
2951
2952 num_fs = 16 / fs_type.length; /* number of loops per 4x4 stamp */
2953 /* for 1d resources only run "upper half" of stamp */
2954 if (key->resource_1d)
2955 num_fs /= 2;
2956
2957 {
2958 LLVMValueRef num_loop = lp_build_const_int32(gallivm, num_fs);
2959 LLVMTypeRef mask_type = lp_build_int_vec_type(gallivm, fs_type);
2960 LLVMValueRef num_loop_samp = lp_build_const_int32(gallivm, num_fs * key->coverage_samples);
2961 LLVMValueRef mask_store = lp_build_array_alloca(gallivm, mask_type,
2962 num_loop_samp, "mask_store");
2963
2964 LLVMTypeRef flt_type = LLVMFloatTypeInContext(gallivm->context);
2965 LLVMValueRef glob_sample_pos = LLVMAddGlobal(gallivm->module, LLVMArrayType(flt_type, key->coverage_samples * 2), "");
2966 LLVMValueRef sample_pos_array;
2967
2968 if (key->multisample && key->coverage_samples == 4) {
2969 LLVMValueRef sample_pos_arr[8];
2970 for (unsigned i = 0; i < 4; i++) {
2971 sample_pos_arr[i * 2] = LLVMConstReal(flt_type, lp_sample_pos_4x[i][0]);
2972 sample_pos_arr[i * 2 + 1] = LLVMConstReal(flt_type, lp_sample_pos_4x[i][1]);
2973 }
2974 sample_pos_array = LLVMConstArray(LLVMFloatTypeInContext(gallivm->context), sample_pos_arr, 8);
2975 } else {
2976 LLVMValueRef sample_pos_arr[2];
2977 sample_pos_arr[0] = LLVMConstReal(flt_type, 0.5);
2978 sample_pos_arr[1] = LLVMConstReal(flt_type, 0.5);
2979 sample_pos_array = LLVMConstArray(LLVMFloatTypeInContext(gallivm->context), sample_pos_arr, 2);
2980 }
2981 LLVMSetInitializer(glob_sample_pos, sample_pos_array);
2982
2983 LLVMValueRef color_store[PIPE_MAX_COLOR_BUFS][TGSI_NUM_CHANNELS];
2984 boolean pixel_center_integer =
2985 shader->info.base.properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER];
2986
2987 /*
2988 * The shader input interpolation info is not explicitely baked in the
2989 * shader key, but everything it derives from (TGSI, and flatshade) is
2990 * already included in the shader key.
2991 */
2992 lp_build_interp_soa_init(&interp,
2993 gallivm,
2994 shader->info.base.num_inputs,
2995 inputs,
2996 pixel_center_integer,
2997 key->coverage_samples, glob_sample_pos,
2998 num_loop,
2999 key->depth_clamp,
3000 builder, fs_type,
3001 a0_ptr, dadx_ptr, dady_ptr,
3002 x, y);
3003
3004 for (i = 0; i < num_fs; i++) {
3005 if (key->multisample) {
3006 LLVMValueRef smask_val = LLVMBuildLoad(builder, lp_jit_context_sample_mask(gallivm, context_ptr), "");
3007
3008 /*
3009 * For multisampling, extract the per-sample mask from the incoming 64-bit mask,
3010 * store to the per sample mask storage. Or all of them together to generate
3011 * the fragment shader mask. (sample shading TODO).
3012 * Take the incoming state coverage mask into account.
3013 */
3014 for (unsigned s = 0; s < key->coverage_samples; s++) {
3015 LLVMValueRef sindexi = lp_build_const_int32(gallivm, i + (s * num_fs));
3016 LLVMValueRef sample_mask_ptr = LLVMBuildGEP(builder, mask_store,
3017 &sindexi, 1, "sample_mask_ptr");
3018 LLVMValueRef s_mask = generate_quad_mask(gallivm, fs_type,
3019 i*fs_type.length/4, s, mask_input);
3020
3021 LLVMValueRef smask_bit = LLVMBuildAnd(builder, smask_val, lp_build_const_int32(gallivm, (1 << s)), "");
3022 LLVMValueRef cmp = LLVMBuildICmp(builder, LLVMIntNE, smask_bit, lp_build_const_int32(gallivm, 0), "");
3023 smask_bit = LLVMBuildSExt(builder, cmp, int32_type, "");
3024 smask_bit = lp_build_broadcast(gallivm, mask_type, smask_bit);
3025
3026 s_mask = LLVMBuildAnd(builder, s_mask, smask_bit, "");
3027 LLVMBuildStore(builder, s_mask, sample_mask_ptr);
3028 }
3029 } else {
3030 LLVMValueRef mask;
3031 LLVMValueRef indexi = lp_build_const_int32(gallivm, i);
3032 LLVMValueRef mask_ptr = LLVMBuildGEP(builder, mask_store,
3033 &indexi, 1, "mask_ptr");
3034
3035 if (partial_mask) {
3036 mask = generate_quad_mask(gallivm, fs_type,
3037 i*fs_type.length/4, 0, mask_input);
3038 }
3039 else {
3040 mask = lp_build_const_int_vec(gallivm, fs_type, ~0);
3041 }
3042 LLVMBuildStore(builder, mask, mask_ptr);
3043 }
3044 }
3045
3046 generate_fs_loop(gallivm,
3047 shader, key,
3048 builder,
3049 fs_type,
3050 context_ptr,
3051 glob_sample_pos,
3052 num_loop,
3053 &interp,
3054 sampler,
3055 image,
3056 mask_store, /* output */
3057 color_store,
3058 depth_ptr,
3059 depth_stride,
3060 depth_sample_stride,
3061 facing,
3062 thread_data_ptr);
3063
3064 for (i = 0; i < num_fs; i++) {
3065 LLVMValueRef ptr;
3066 for (unsigned s = 0; s < key->coverage_samples; s++) {
3067 int idx = (i + (s * num_fs));
3068 LLVMValueRef sindexi = lp_build_const_int32(gallivm, idx);
3069 ptr = LLVMBuildGEP(builder, mask_store, &sindexi, 1, "");
3070
3071 fs_mask[idx] = LLVMBuildLoad(builder, ptr, "smask");
3072 }
3073
3074 for (unsigned s = 0; s < key->min_samples; s++) {
3075 /* This is fucked up need to reorganize things */
3076 int idx = s * num_fs + i;
3077 LLVMValueRef sindexi = lp_build_const_int32(gallivm, idx);
3078 for (cbuf = 0; cbuf < key->nr_cbufs; cbuf++) {
3079 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan) {
3080 ptr = LLVMBuildGEP(builder,
3081 color_store[cbuf * !cbuf0_write_all][chan],
3082 &sindexi, 1, "");
3083 fs_out_color[s][cbuf][chan][i] = ptr;
3084 }
3085 }
3086 if (dual_source_blend) {
3087 /* only support one dual source blend target hence always use output 1 */
3088 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan) {
3089 ptr = LLVMBuildGEP(builder,
3090 color_store[1][chan],
3091 &sindexi, 1, "");
3092 fs_out_color[s][1][chan][i] = ptr;
3093 }
3094 }
3095 }
3096 }
3097 }
3098
3099 sampler->destroy(sampler);
3100 image->destroy(image);
3101 /* Loop over color outputs / color buffers to do blending.
3102 */
3103 for(cbuf = 0; cbuf < key->nr_cbufs; cbuf++) {
3104 if (key->cbuf_format[cbuf] != PIPE_FORMAT_NONE) {
3105 LLVMValueRef color_ptr;
3106 LLVMValueRef stride;
3107 LLVMValueRef sample_stride = NULL;
3108 LLVMValueRef index = lp_build_const_int32(gallivm, cbuf);
3109
3110 boolean do_branch = ((key->depth.enabled
3111 || key->stencil[0].enabled
3112 || key->alpha.enabled)
3113 && !shader->info.base.uses_kill);
3114
3115 color_ptr = LLVMBuildLoad(builder,
3116 LLVMBuildGEP(builder, color_ptr_ptr,
3117 &index, 1, ""),
3118 "");
3119
3120 stride = LLVMBuildLoad(builder,
3121 LLVMBuildGEP(builder, stride_ptr, &index, 1, ""),
3122 "");
3123
3124 if (key->multisample)
3125 sample_stride = LLVMBuildLoad(builder,
3126 LLVMBuildGEP(builder, color_sample_stride_ptr,
3127 &index, 1, ""), "");
3128
3129 for (unsigned s = 0; s < key->cbuf_nr_samples[cbuf]; s++) {
3130 unsigned mask_idx = num_fs * (key->multisample ? s : 0);
3131 unsigned out_idx = key->min_samples == 1 ? 0 : s;
3132 LLVMValueRef out_ptr = color_ptr;;
3133
3134 if (key->multisample) {
3135 LLVMValueRef sample_offset = LLVMBuildMul(builder, sample_stride, lp_build_const_int32(gallivm, s), "");
3136 out_ptr = LLVMBuildGEP(builder, out_ptr, &sample_offset, 1, "");
3137 }
3138 out_ptr = LLVMBuildBitCast(builder, out_ptr, LLVMPointerType(blend_vec_type, 0), "");
3139
3140 lp_build_name(out_ptr, "color_ptr%d", cbuf);
3141
3142 generate_unswizzled_blend(gallivm, cbuf, variant,
3143 key->cbuf_format[cbuf],
3144 num_fs, fs_type, &fs_mask[mask_idx], fs_out_color[out_idx],
3145 context_ptr, out_ptr, stride,
3146 partial_mask, do_branch);
3147 }
3148 }
3149 }
3150
3151 LLVMBuildRetVoid(builder);
3152
3153 gallivm_verify_function(gallivm, function);
3154 }
3155
3156
3157 static void
3158 dump_fs_variant_key(struct lp_fragment_shader_variant_key *key)
3159 {
3160 unsigned i;
3161
3162 debug_printf("fs variant %p:\n", (void *) key);
3163
3164 if (key->flatshade) {
3165 debug_printf("flatshade = 1\n");
3166 }
3167 if (key->multisample) {
3168 debug_printf("multisample = 1\n");
3169 debug_printf("coverage samples = %d\n", key->coverage_samples);
3170 debug_printf("min samples = %d\n", key->min_samples);
3171 }
3172 for (i = 0; i < key->nr_cbufs; ++i) {
3173 debug_printf("cbuf_format[%u] = %s\n", i, util_format_name(key->cbuf_format[i]));
3174 debug_printf("cbuf nr_samples[%u] = %d\n", i, key->cbuf_nr_samples[i]);
3175 }
3176 if (key->depth.enabled || key->stencil[0].enabled) {
3177 debug_printf("depth.format = %s\n", util_format_name(key->zsbuf_format));
3178 debug_printf("depth nr_samples = %d\n", key->zsbuf_nr_samples);
3179 }
3180 if (key->depth.enabled) {
3181 debug_printf("depth.func = %s\n", util_str_func(key->depth.func, TRUE));
3182 debug_printf("depth.writemask = %u\n", key->depth.writemask);
3183 }
3184
3185 for (i = 0; i < 2; ++i) {
3186 if (key->stencil[i].enabled) {
3187 debug_printf("stencil[%u].func = %s\n", i, util_str_func(key->stencil[i].func, TRUE));
3188 debug_printf("stencil[%u].fail_op = %s\n", i, util_str_stencil_op(key->stencil[i].fail_op, TRUE));
3189 debug_printf("stencil[%u].zpass_op = %s\n", i, util_str_stencil_op(key->stencil[i].zpass_op, TRUE));
3190 debug_printf("stencil[%u].zfail_op = %s\n", i, util_str_stencil_op(key->stencil[i].zfail_op, TRUE));
3191 debug_printf("stencil[%u].valuemask = 0x%x\n", i, key->stencil[i].valuemask);
3192 debug_printf("stencil[%u].writemask = 0x%x\n", i, key->stencil[i].writemask);
3193 }
3194 }
3195
3196 if (key->alpha.enabled) {
3197 debug_printf("alpha.func = %s\n", util_str_func(key->alpha.func, TRUE));
3198 }
3199
3200 if (key->occlusion_count) {
3201 debug_printf("occlusion_count = 1\n");
3202 }
3203
3204 if (key->blend.logicop_enable) {
3205 debug_printf("blend.logicop_func = %s\n", util_str_logicop(key->blend.logicop_func, TRUE));
3206 }
3207 else if (key->blend.rt[0].blend_enable) {
3208 debug_printf("blend.rgb_func = %s\n", util_str_blend_func (key->blend.rt[0].rgb_func, TRUE));
3209 debug_printf("blend.rgb_src_factor = %s\n", util_str_blend_factor(key->blend.rt[0].rgb_src_factor, TRUE));
3210 debug_printf("blend.rgb_dst_factor = %s\n", util_str_blend_factor(key->blend.rt[0].rgb_dst_factor, TRUE));
3211 debug_printf("blend.alpha_func = %s\n", util_str_blend_func (key->blend.rt[0].alpha_func, TRUE));
3212 debug_printf("blend.alpha_src_factor = %s\n", util_str_blend_factor(key->blend.rt[0].alpha_src_factor, TRUE));
3213 debug_printf("blend.alpha_dst_factor = %s\n", util_str_blend_factor(key->blend.rt[0].alpha_dst_factor, TRUE));
3214 }
3215 debug_printf("blend.colormask = 0x%x\n", key->blend.rt[0].colormask);
3216 if (key->blend.alpha_to_coverage) {
3217 debug_printf("blend.alpha_to_coverage is enabled\n");
3218 }
3219 for (i = 0; i < key->nr_samplers; ++i) {
3220 const struct lp_static_sampler_state *sampler = &key->samplers[i].sampler_state;
3221 debug_printf("sampler[%u] = \n", i);
3222 debug_printf(" .wrap = %s %s %s\n",
3223 util_str_tex_wrap(sampler->wrap_s, TRUE),
3224 util_str_tex_wrap(sampler->wrap_t, TRUE),
3225 util_str_tex_wrap(sampler->wrap_r, TRUE));
3226 debug_printf(" .min_img_filter = %s\n",
3227 util_str_tex_filter(sampler->min_img_filter, TRUE));
3228 debug_printf(" .min_mip_filter = %s\n",
3229 util_str_tex_mipfilter(sampler->min_mip_filter, TRUE));
3230 debug_printf(" .mag_img_filter = %s\n",
3231 util_str_tex_filter(sampler->mag_img_filter, TRUE));
3232 if (sampler->compare_mode != PIPE_TEX_COMPARE_NONE)
3233 debug_printf(" .compare_func = %s\n", util_str_func(sampler->compare_func, TRUE));
3234 debug_printf(" .normalized_coords = %u\n", sampler->normalized_coords);
3235 debug_printf(" .min_max_lod_equal = %u\n", sampler->min_max_lod_equal);
3236 debug_printf(" .lod_bias_non_zero = %u\n", sampler->lod_bias_non_zero);
3237 debug_printf(" .apply_min_lod = %u\n", sampler->apply_min_lod);
3238 debug_printf(" .apply_max_lod = %u\n", sampler->apply_max_lod);
3239 }
3240 for (i = 0; i < key->nr_sampler_views; ++i) {
3241 const struct lp_static_texture_state *texture = &key->samplers[i].texture_state;
3242 debug_printf("texture[%u] = \n", i);
3243 debug_printf(" .format = %s\n",
3244 util_format_name(texture->format));
3245 debug_printf(" .target = %s\n",
3246 util_str_tex_target(texture->target, TRUE));
3247 debug_printf(" .level_zero_only = %u\n",
3248 texture->level_zero_only);
3249 debug_printf(" .pot = %u %u %u\n",
3250 texture->pot_width,
3251 texture->pot_height,
3252 texture->pot_depth);
3253 }
3254 struct lp_image_static_state *images = lp_fs_variant_key_images(key);
3255 for (i = 0; i < key->nr_images; ++i) {
3256 const struct lp_static_texture_state *image = &images[i].image_state;
3257 debug_printf("image[%u] = \n", i);
3258 debug_printf(" .format = %s\n",
3259 util_format_name(image->format));
3260 debug_printf(" .target = %s\n",
3261 util_str_tex_target(image->target, TRUE));
3262 debug_printf(" .level_zero_only = %u\n",
3263 image->level_zero_only);
3264 debug_printf(" .pot = %u %u %u\n",
3265 image->pot_width,
3266 image->pot_height,
3267 image->pot_depth);
3268 }
3269 }
3270
3271
3272 void
3273 lp_debug_fs_variant(struct lp_fragment_shader_variant *variant)
3274 {
3275 debug_printf("llvmpipe: Fragment shader #%u variant #%u:\n",
3276 variant->shader->no, variant->no);
3277 if (variant->shader->base.type == PIPE_SHADER_IR_TGSI)
3278 tgsi_dump(variant->shader->base.tokens, 0);
3279 else
3280 nir_print_shader(variant->shader->base.ir.nir, stderr);
3281 dump_fs_variant_key(&variant->key);
3282 debug_printf("variant->opaque = %u\n", variant->opaque);
3283 debug_printf("\n");
3284 }
3285
3286 static void
3287 lp_fs_get_ir_cache_key(struct lp_fragment_shader_variant *variant,
3288 unsigned char ir_sha1_cache_key[20])
3289 {
3290 struct blob blob = { 0 };
3291 unsigned ir_size;
3292 void *ir_binary;
3293
3294 blob_init(&blob);
3295 nir_serialize(&blob, variant->shader->base.ir.nir, true);
3296 ir_binary = blob.data;
3297 ir_size = blob.size;
3298
3299 struct mesa_sha1 ctx;
3300 _mesa_sha1_init(&ctx);
3301 _mesa_sha1_update(&ctx, &variant->key, variant->shader->variant_key_size);
3302 _mesa_sha1_update(&ctx, ir_binary, ir_size);
3303 _mesa_sha1_final(&ctx, ir_sha1_cache_key);
3304
3305 blob_finish(&blob);
3306 }
3307
3308 /**
3309 * Generate a new fragment shader variant from the shader code and
3310 * other state indicated by the key.
3311 */
3312 static struct lp_fragment_shader_variant *
3313 generate_variant(struct llvmpipe_context *lp,
3314 struct lp_fragment_shader *shader,
3315 const struct lp_fragment_shader_variant_key *key)
3316 {
3317 struct llvmpipe_screen *screen = llvmpipe_screen(lp->pipe.screen);
3318 struct lp_fragment_shader_variant *variant;
3319 const struct util_format_description *cbuf0_format_desc = NULL;
3320 boolean fullcolormask;
3321 char module_name[64];
3322 unsigned char ir_sha1_cache_key[20];
3323 struct lp_cached_code cached = { 0 };
3324 bool needs_caching = false;
3325 variant = MALLOC(sizeof *variant + shader->variant_key_size - sizeof variant->key);
3326 if (!variant)
3327 return NULL;
3328
3329 memset(variant, 0, sizeof(*variant));
3330 snprintf(module_name, sizeof(module_name), "fs%u_variant%u",
3331 shader->no, shader->variants_created);
3332
3333 variant->shader = shader;
3334 memcpy(&variant->key, key, shader->variant_key_size);
3335
3336 if (shader->base.ir.nir) {
3337 lp_fs_get_ir_cache_key(variant, ir_sha1_cache_key);
3338
3339 lp_disk_cache_find_shader(screen, &cached, ir_sha1_cache_key);
3340 if (!cached.data_size)
3341 needs_caching = true;
3342 }
3343 variant->gallivm = gallivm_create(module_name, lp->context, &cached);
3344 if (!variant->gallivm) {
3345 FREE(variant);
3346 return NULL;
3347 }
3348
3349 variant->list_item_global.base = variant;
3350 variant->list_item_local.base = variant;
3351 variant->no = shader->variants_created++;
3352
3353
3354
3355 /*
3356 * Determine whether we are touching all channels in the color buffer.
3357 */
3358 fullcolormask = FALSE;
3359 if (key->nr_cbufs == 1) {
3360 cbuf0_format_desc = util_format_description(key->cbuf_format[0]);
3361 fullcolormask = util_format_colormask_full(cbuf0_format_desc, key->blend.rt[0].colormask);
3362 }
3363
3364 variant->opaque =
3365 !key->blend.logicop_enable &&
3366 !key->blend.rt[0].blend_enable &&
3367 fullcolormask &&
3368 !key->stencil[0].enabled &&
3369 !key->alpha.enabled &&
3370 !key->multisample &&
3371 !key->blend.alpha_to_coverage &&
3372 !key->depth.enabled &&
3373 !shader->info.base.uses_kill &&
3374 !shader->info.base.writes_samplemask
3375 ? TRUE : FALSE;
3376
3377 if ((LP_DEBUG & DEBUG_FS) || (gallivm_debug & GALLIVM_DEBUG_IR)) {
3378 lp_debug_fs_variant(variant);
3379 }
3380
3381 lp_jit_init_types(variant);
3382
3383 if (variant->jit_function[RAST_EDGE_TEST] == NULL)
3384 generate_fragment(lp, shader, variant, RAST_EDGE_TEST);
3385
3386 if (variant->jit_function[RAST_WHOLE] == NULL) {
3387 if (variant->opaque) {
3388 /* Specialized shader, which doesn't need to read the color buffer. */
3389 generate_fragment(lp, shader, variant, RAST_WHOLE);
3390 }
3391 }
3392
3393 /*
3394 * Compile everything
3395 */
3396
3397 gallivm_compile_module(variant->gallivm);
3398
3399 variant->nr_instrs += lp_build_count_ir_module(variant->gallivm->module);
3400
3401 if (variant->function[RAST_EDGE_TEST]) {
3402 variant->jit_function[RAST_EDGE_TEST] = (lp_jit_frag_func)
3403 gallivm_jit_function(variant->gallivm,
3404 variant->function[RAST_EDGE_TEST]);
3405 }
3406
3407 if (variant->function[RAST_WHOLE]) {
3408 variant->jit_function[RAST_WHOLE] = (lp_jit_frag_func)
3409 gallivm_jit_function(variant->gallivm,
3410 variant->function[RAST_WHOLE]);
3411 } else if (!variant->jit_function[RAST_WHOLE]) {
3412 variant->jit_function[RAST_WHOLE] = variant->jit_function[RAST_EDGE_TEST];
3413 }
3414
3415 if (needs_caching) {
3416 lp_disk_cache_insert_shader(screen, &cached, ir_sha1_cache_key);
3417 }
3418
3419 gallivm_free_ir(variant->gallivm);
3420
3421 return variant;
3422 }
3423
3424
3425 static void *
3426 llvmpipe_create_fs_state(struct pipe_context *pipe,
3427 const struct pipe_shader_state *templ)
3428 {
3429 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
3430 struct lp_fragment_shader *shader;
3431 int nr_samplers;
3432 int nr_sampler_views;
3433 int nr_images;
3434 int i;
3435
3436 shader = CALLOC_STRUCT(lp_fragment_shader);
3437 if (!shader)
3438 return NULL;
3439
3440 shader->no = fs_no++;
3441 make_empty_list(&shader->variants);
3442
3443 shader->base.type = templ->type;
3444 if (templ->type == PIPE_SHADER_IR_TGSI) {
3445 /* get/save the summary info for this shader */
3446 lp_build_tgsi_info(templ->tokens, &shader->info);
3447
3448 /* we need to keep a local copy of the tokens */
3449 shader->base.tokens = tgsi_dup_tokens(templ->tokens);
3450 } else {
3451 shader->base.ir.nir = templ->ir.nir;
3452 nir_tgsi_scan_shader(templ->ir.nir, &shader->info.base, true);
3453 }
3454
3455 shader->draw_data = draw_create_fragment_shader(llvmpipe->draw, templ);
3456 if (shader->draw_data == NULL) {
3457 FREE((void *) shader->base.tokens);
3458 FREE(shader);
3459 return NULL;
3460 }
3461
3462 nr_samplers = shader->info.base.file_max[TGSI_FILE_SAMPLER] + 1;
3463 nr_sampler_views = shader->info.base.file_max[TGSI_FILE_SAMPLER_VIEW] + 1;
3464 nr_images = shader->info.base.file_max[TGSI_FILE_IMAGE] + 1;
3465 shader->variant_key_size = lp_fs_variant_key_size(MAX2(nr_samplers, nr_sampler_views), nr_images);
3466
3467 for (i = 0; i < shader->info.base.num_inputs; i++) {
3468 shader->inputs[i].usage_mask = shader->info.base.input_usage_mask[i];
3469 shader->inputs[i].cyl_wrap = shader->info.base.input_cylindrical_wrap[i];
3470 shader->inputs[i].location = shader->info.base.input_interpolate_loc[i];
3471
3472 switch (shader->info.base.input_interpolate[i]) {
3473 case TGSI_INTERPOLATE_CONSTANT:
3474 shader->inputs[i].interp = LP_INTERP_CONSTANT;
3475 break;
3476 case TGSI_INTERPOLATE_LINEAR:
3477 shader->inputs[i].interp = LP_INTERP_LINEAR;
3478 break;
3479 case TGSI_INTERPOLATE_PERSPECTIVE:
3480 shader->inputs[i].interp = LP_INTERP_PERSPECTIVE;
3481 break;
3482 case TGSI_INTERPOLATE_COLOR:
3483 shader->inputs[i].interp = LP_INTERP_COLOR;
3484 break;
3485 default:
3486 assert(0);
3487 break;
3488 }
3489
3490 switch (shader->info.base.input_semantic_name[i]) {
3491 case TGSI_SEMANTIC_FACE:
3492 shader->inputs[i].interp = LP_INTERP_FACING;
3493 break;
3494 case TGSI_SEMANTIC_POSITION:
3495 /* Position was already emitted above
3496 */
3497 shader->inputs[i].interp = LP_INTERP_POSITION;
3498 shader->inputs[i].src_index = 0;
3499 continue;
3500 }
3501
3502 /* XXX this is a completely pointless index map... */
3503 shader->inputs[i].src_index = i+1;
3504 }
3505
3506 if (LP_DEBUG & DEBUG_TGSI) {
3507 unsigned attrib;
3508 debug_printf("llvmpipe: Create fragment shader #%u %p:\n",
3509 shader->no, (void *) shader);
3510 tgsi_dump(templ->tokens, 0);
3511 debug_printf("usage masks:\n");
3512 for (attrib = 0; attrib < shader->info.base.num_inputs; ++attrib) {
3513 unsigned usage_mask = shader->info.base.input_usage_mask[attrib];
3514 debug_printf(" IN[%u].%s%s%s%s\n",
3515 attrib,
3516 usage_mask & TGSI_WRITEMASK_X ? "x" : "",
3517 usage_mask & TGSI_WRITEMASK_Y ? "y" : "",
3518 usage_mask & TGSI_WRITEMASK_Z ? "z" : "",
3519 usage_mask & TGSI_WRITEMASK_W ? "w" : "");
3520 }
3521 debug_printf("\n");
3522 }
3523
3524 return shader;
3525 }
3526
3527
3528 static void
3529 llvmpipe_bind_fs_state(struct pipe_context *pipe, void *fs)
3530 {
3531 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
3532 struct lp_fragment_shader *lp_fs = (struct lp_fragment_shader *)fs;
3533 if (llvmpipe->fs == lp_fs)
3534 return;
3535
3536 draw_bind_fragment_shader(llvmpipe->draw,
3537 (lp_fs ? lp_fs->draw_data : NULL));
3538
3539 llvmpipe->fs = lp_fs;
3540
3541 llvmpipe->dirty |= LP_NEW_FS;
3542 }
3543
3544
3545 /**
3546 * Remove shader variant from two lists: the shader's variant list
3547 * and the context's variant list.
3548 */
3549 static void
3550 llvmpipe_remove_shader_variant(struct llvmpipe_context *lp,
3551 struct lp_fragment_shader_variant *variant)
3552 {
3553 if ((LP_DEBUG & DEBUG_FS) || (gallivm_debug & GALLIVM_DEBUG_IR)) {
3554 debug_printf("llvmpipe: del fs #%u var %u v created %u v cached %u "
3555 "v total cached %u inst %u total inst %u\n",
3556 variant->shader->no, variant->no,
3557 variant->shader->variants_created,
3558 variant->shader->variants_cached,
3559 lp->nr_fs_variants, variant->nr_instrs, lp->nr_fs_instrs);
3560 }
3561
3562 gallivm_destroy(variant->gallivm);
3563
3564 /* remove from shader's list */
3565 remove_from_list(&variant->list_item_local);
3566 variant->shader->variants_cached--;
3567
3568 /* remove from context's list */
3569 remove_from_list(&variant->list_item_global);
3570 lp->nr_fs_variants--;
3571 lp->nr_fs_instrs -= variant->nr_instrs;
3572
3573 FREE(variant);
3574 }
3575
3576
3577 static void
3578 llvmpipe_delete_fs_state(struct pipe_context *pipe, void *fs)
3579 {
3580 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
3581 struct lp_fragment_shader *shader = fs;
3582 struct lp_fs_variant_list_item *li;
3583
3584 assert(fs != llvmpipe->fs);
3585
3586 /*
3587 * XXX: we need to flush the context until we have some sort of reference
3588 * counting in fragment shaders as they may still be binned
3589 * Flushing alone might not sufficient we need to wait on it too.
3590 */
3591 llvmpipe_finish(pipe, __FUNCTION__);
3592
3593 /* Delete all the variants */
3594 li = first_elem(&shader->variants);
3595 while(!at_end(&shader->variants, li)) {
3596 struct lp_fs_variant_list_item *next = next_elem(li);
3597 llvmpipe_remove_shader_variant(llvmpipe, li->base);
3598 li = next;
3599 }
3600
3601 /* Delete draw module's data */
3602 draw_delete_fragment_shader(llvmpipe->draw, shader->draw_data);
3603
3604 if (shader->base.ir.nir)
3605 ralloc_free(shader->base.ir.nir);
3606 assert(shader->variants_cached == 0);
3607 FREE((void *) shader->base.tokens);
3608 FREE(shader);
3609 }
3610
3611
3612
3613 static void
3614 llvmpipe_set_constant_buffer(struct pipe_context *pipe,
3615 enum pipe_shader_type shader, uint index,
3616 const struct pipe_constant_buffer *cb)
3617 {
3618 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
3619 struct pipe_resource *constants = cb ? cb->buffer : NULL;
3620
3621 assert(shader < PIPE_SHADER_TYPES);
3622 assert(index < ARRAY_SIZE(llvmpipe->constants[shader]));
3623
3624 /* note: reference counting */
3625 util_copy_constant_buffer(&llvmpipe->constants[shader][index], cb);
3626
3627 if (constants) {
3628 if (!(constants->bind & PIPE_BIND_CONSTANT_BUFFER)) {
3629 debug_printf("Illegal set constant without bind flag\n");
3630 constants->bind |= PIPE_BIND_CONSTANT_BUFFER;
3631 }
3632 }
3633
3634 if (shader == PIPE_SHADER_VERTEX ||
3635 shader == PIPE_SHADER_GEOMETRY ||
3636 shader == PIPE_SHADER_TESS_CTRL ||
3637 shader == PIPE_SHADER_TESS_EVAL) {
3638 /* Pass the constants to the 'draw' module */
3639 const unsigned size = cb ? cb->buffer_size : 0;
3640 const ubyte *data;
3641
3642 if (constants) {
3643 data = (ubyte *) llvmpipe_resource_data(constants);
3644 }
3645 else if (cb && cb->user_buffer) {
3646 data = (ubyte *) cb->user_buffer;
3647 }
3648 else {
3649 data = NULL;
3650 }
3651
3652 if (data)
3653 data += cb->buffer_offset;
3654
3655 draw_set_mapped_constant_buffer(llvmpipe->draw, shader,
3656 index, data, size);
3657 }
3658 else if (shader == PIPE_SHADER_COMPUTE)
3659 llvmpipe->cs_dirty |= LP_CSNEW_CONSTANTS;
3660 else
3661 llvmpipe->dirty |= LP_NEW_FS_CONSTANTS;
3662
3663 if (cb && cb->user_buffer) {
3664 pipe_resource_reference(&constants, NULL);
3665 }
3666 }
3667
3668 static void
3669 llvmpipe_set_shader_buffers(struct pipe_context *pipe,
3670 enum pipe_shader_type shader, unsigned start_slot,
3671 unsigned count, const struct pipe_shader_buffer *buffers,
3672 unsigned writable_bitmask)
3673 {
3674 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
3675 unsigned i, idx;
3676 for (i = start_slot, idx = 0; i < start_slot + count; i++, idx++) {
3677 const struct pipe_shader_buffer *buffer = buffers ? &buffers[idx] : NULL;
3678
3679 util_copy_shader_buffer(&llvmpipe->ssbos[shader][i], buffer);
3680
3681 if (shader == PIPE_SHADER_VERTEX ||
3682 shader == PIPE_SHADER_GEOMETRY ||
3683 shader == PIPE_SHADER_TESS_CTRL ||
3684 shader == PIPE_SHADER_TESS_EVAL) {
3685 const unsigned size = buffer ? buffer->buffer_size : 0;
3686 const ubyte *data = NULL;
3687 if (buffer && buffer->buffer)
3688 data = (ubyte *) llvmpipe_resource_data(buffer->buffer);
3689 if (data)
3690 data += buffer->buffer_offset;
3691 draw_set_mapped_shader_buffer(llvmpipe->draw, shader,
3692 i, data, size);
3693 } else if (shader == PIPE_SHADER_COMPUTE) {
3694 llvmpipe->cs_dirty |= LP_CSNEW_SSBOS;
3695 } else if (shader == PIPE_SHADER_FRAGMENT) {
3696 llvmpipe->dirty |= LP_NEW_FS_SSBOS;
3697 }
3698 }
3699 }
3700
3701 static void
3702 llvmpipe_set_shader_images(struct pipe_context *pipe,
3703 enum pipe_shader_type shader, unsigned start_slot,
3704 unsigned count, const struct pipe_image_view *images)
3705 {
3706 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
3707 unsigned i, idx;
3708
3709 draw_flush(llvmpipe->draw);
3710 for (i = start_slot, idx = 0; i < start_slot + count; i++, idx++) {
3711 const struct pipe_image_view *image = images ? &images[idx] : NULL;
3712
3713 util_copy_image_view(&llvmpipe->images[shader][i], image);
3714 }
3715
3716 llvmpipe->num_images[shader] = start_slot + count;
3717 if (shader == PIPE_SHADER_VERTEX ||
3718 shader == PIPE_SHADER_GEOMETRY ||
3719 shader == PIPE_SHADER_TESS_CTRL ||
3720 shader == PIPE_SHADER_TESS_EVAL) {
3721 draw_set_images(llvmpipe->draw,
3722 shader,
3723 llvmpipe->images[shader],
3724 start_slot + count);
3725 } else if (shader == PIPE_SHADER_COMPUTE)
3726 llvmpipe->cs_dirty |= LP_CSNEW_IMAGES;
3727 else
3728 llvmpipe->dirty |= LP_NEW_FS_IMAGES;
3729 }
3730
3731 /**
3732 * Return the blend factor equivalent to a destination alpha of one.
3733 */
3734 static inline unsigned
3735 force_dst_alpha_one(unsigned factor, boolean clamped_zero)
3736 {
3737 switch(factor) {
3738 case PIPE_BLENDFACTOR_DST_ALPHA:
3739 return PIPE_BLENDFACTOR_ONE;
3740 case PIPE_BLENDFACTOR_INV_DST_ALPHA:
3741 return PIPE_BLENDFACTOR_ZERO;
3742 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
3743 if (clamped_zero)
3744 return PIPE_BLENDFACTOR_ZERO;
3745 else
3746 return PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE;
3747 }
3748
3749 return factor;
3750 }
3751
3752
3753 /**
3754 * We need to generate several variants of the fragment pipeline to match
3755 * all the combinations of the contributing state atoms.
3756 *
3757 * TODO: there is actually no reason to tie this to context state -- the
3758 * generated code could be cached globally in the screen.
3759 */
3760 static struct lp_fragment_shader_variant_key *
3761 make_variant_key(struct llvmpipe_context *lp,
3762 struct lp_fragment_shader *shader,
3763 char *store)
3764 {
3765 unsigned i;
3766 struct lp_fragment_shader_variant_key *key;
3767
3768 key = (struct lp_fragment_shader_variant_key *)store;
3769
3770 memset(key, 0, offsetof(struct lp_fragment_shader_variant_key, samplers[1]));
3771
3772 if (lp->framebuffer.zsbuf) {
3773 enum pipe_format zsbuf_format = lp->framebuffer.zsbuf->format;
3774 const struct util_format_description *zsbuf_desc =
3775 util_format_description(zsbuf_format);
3776
3777 if (lp->depth_stencil->depth.enabled &&
3778 util_format_has_depth(zsbuf_desc)) {
3779 key->zsbuf_format = zsbuf_format;
3780 memcpy(&key->depth, &lp->depth_stencil->depth, sizeof key->depth);
3781 }
3782 if (lp->depth_stencil->stencil[0].enabled &&
3783 util_format_has_stencil(zsbuf_desc)) {
3784 key->zsbuf_format = zsbuf_format;
3785 memcpy(&key->stencil, &lp->depth_stencil->stencil, sizeof key->stencil);
3786 }
3787 if (llvmpipe_resource_is_1d(lp->framebuffer.zsbuf->texture)) {
3788 key->resource_1d = TRUE;
3789 }
3790 key->zsbuf_nr_samples = util_res_sample_count(lp->framebuffer.zsbuf->texture);
3791 }
3792
3793 /*
3794 * Propagate the depth clamp setting from the rasterizer state.
3795 * depth_clip == 0 implies depth clamping is enabled.
3796 *
3797 * When clip_halfz is enabled, then always clamp the depth values.
3798 *
3799 * XXX: This is incorrect for GL, but correct for d3d10 (depth
3800 * clamp is always active in d3d10, regardless if depth clip is
3801 * enabled or not).
3802 * (GL has an always-on [0,1] clamp on fs depth output instead
3803 * to ensure the depth values stay in range. Doesn't look like
3804 * we do that, though...)
3805 */
3806 if (lp->rasterizer->clip_halfz) {
3807 key->depth_clamp = 1;
3808 } else {
3809 key->depth_clamp = (lp->rasterizer->depth_clip_near == 0) ? 1 : 0;
3810 }
3811
3812 /* alpha test only applies if render buffer 0 is non-integer (or does not exist) */
3813 if (!lp->framebuffer.nr_cbufs ||
3814 !lp->framebuffer.cbufs[0] ||
3815 !util_format_is_pure_integer(lp->framebuffer.cbufs[0]->format)) {
3816 key->alpha.enabled = lp->depth_stencil->alpha.enabled;
3817 }
3818 if(key->alpha.enabled)
3819 key->alpha.func = lp->depth_stencil->alpha.func;
3820 /* alpha.ref_value is passed in jit_context */
3821
3822 key->flatshade = lp->rasterizer->flatshade;
3823 key->multisample = lp->rasterizer->multisample;
3824 if (lp->active_occlusion_queries && !lp->queries_disabled) {
3825 key->occlusion_count = TRUE;
3826 }
3827
3828 if (lp->framebuffer.nr_cbufs) {
3829 memcpy(&key->blend, lp->blend, sizeof key->blend);
3830 }
3831
3832 key->coverage_samples = 1;
3833 key->min_samples = 1;
3834 if (key->multisample) {
3835 key->coverage_samples = util_framebuffer_get_num_samples(&lp->framebuffer);
3836 key->min_samples = lp->min_samples == 1 ? 1 : key->coverage_samples;
3837 }
3838 key->nr_cbufs = lp->framebuffer.nr_cbufs;
3839
3840 if (!key->blend.independent_blend_enable) {
3841 /* we always need independent blend otherwise the fixups below won't work */
3842 for (i = 1; i < key->nr_cbufs; i++) {
3843 memcpy(&key->blend.rt[i], &key->blend.rt[0], sizeof(key->blend.rt[0]));
3844 }
3845 key->blend.independent_blend_enable = 1;
3846 }
3847
3848 for (i = 0; i < lp->framebuffer.nr_cbufs; i++) {
3849 struct pipe_rt_blend_state *blend_rt = &key->blend.rt[i];
3850
3851 if (lp->framebuffer.cbufs[i]) {
3852 enum pipe_format format = lp->framebuffer.cbufs[i]->format;
3853 const struct util_format_description *format_desc;
3854
3855 key->cbuf_format[i] = format;
3856 key->cbuf_nr_samples[i] = util_res_sample_count(lp->framebuffer.cbufs[i]->texture);
3857
3858 /*
3859 * Figure out if this is a 1d resource. Note that OpenGL allows crazy
3860 * mixing of 2d textures with height 1 and 1d textures, so make sure
3861 * we pick 1d if any cbuf or zsbuf is 1d.
3862 */
3863 if (llvmpipe_resource_is_1d(lp->framebuffer.cbufs[i]->texture)) {
3864 key->resource_1d = TRUE;
3865 }
3866
3867 format_desc = util_format_description(format);
3868 assert(format_desc->colorspace == UTIL_FORMAT_COLORSPACE_RGB ||
3869 format_desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB);
3870
3871 /*
3872 * Mask out color channels not present in the color buffer.
3873 */
3874 blend_rt->colormask &= util_format_colormask(format_desc);
3875
3876 /*
3877 * Disable blend for integer formats.
3878 */
3879 if (util_format_is_pure_integer(format)) {
3880 blend_rt->blend_enable = 0;
3881 }
3882
3883 /*
3884 * Our swizzled render tiles always have an alpha channel, but the
3885 * linear render target format often does not, so force here the dst
3886 * alpha to be one.
3887 *
3888 * This is not a mere optimization. Wrong results will be produced if
3889 * the dst alpha is used, the dst format does not have alpha, and the
3890 * previous rendering was not flushed from the swizzled to linear
3891 * buffer. For example, NonPowTwo DCT.
3892 *
3893 * TODO: This should be generalized to all channels for better
3894 * performance, but only alpha causes correctness issues.
3895 *
3896 * Also, force rgb/alpha func/factors match, to make AoS blending
3897 * easier.
3898 */
3899 if (format_desc->swizzle[3] > PIPE_SWIZZLE_W ||
3900 format_desc->swizzle[3] == format_desc->swizzle[0]) {
3901 /* Doesn't cover mixed snorm/unorm but can't render to them anyway */
3902 boolean clamped_zero = !util_format_is_float(format) &&
3903 !util_format_is_snorm(format);
3904 blend_rt->rgb_src_factor =
3905 force_dst_alpha_one(blend_rt->rgb_src_factor, clamped_zero);
3906 blend_rt->rgb_dst_factor =
3907 force_dst_alpha_one(blend_rt->rgb_dst_factor, clamped_zero);
3908 blend_rt->alpha_func = blend_rt->rgb_func;
3909 blend_rt->alpha_src_factor = blend_rt->rgb_src_factor;
3910 blend_rt->alpha_dst_factor = blend_rt->rgb_dst_factor;
3911 }
3912 }
3913 else {
3914 /* no color buffer for this fragment output */
3915 key->cbuf_format[i] = PIPE_FORMAT_NONE;
3916 key->cbuf_nr_samples[i] = 0;
3917 blend_rt->colormask = 0x0;
3918 blend_rt->blend_enable = 0;
3919 }
3920 }
3921
3922 /* This value will be the same for all the variants of a given shader:
3923 */
3924 key->nr_samplers = shader->info.base.file_max[TGSI_FILE_SAMPLER] + 1;
3925
3926 struct lp_sampler_static_state *fs_sampler;
3927
3928 fs_sampler = key->samplers;
3929
3930 memset(fs_sampler, 0, MAX2(key->nr_samplers, key->nr_sampler_views) * sizeof *fs_sampler);
3931
3932 for(i = 0; i < key->nr_samplers; ++i) {
3933 if(shader->info.base.file_mask[TGSI_FILE_SAMPLER] & (1 << i)) {
3934 lp_sampler_static_sampler_state(&fs_sampler[i].sampler_state,
3935 lp->samplers[PIPE_SHADER_FRAGMENT][i]);
3936 }
3937 }
3938
3939 /*
3940 * XXX If TGSI_FILE_SAMPLER_VIEW exists assume all texture opcodes
3941 * are dx10-style? Can't really have mixed opcodes, at least not
3942 * if we want to skip the holes here (without rescanning tgsi).
3943 */
3944 if (shader->info.base.file_max[TGSI_FILE_SAMPLER_VIEW] != -1) {
3945 key->nr_sampler_views = shader->info.base.file_max[TGSI_FILE_SAMPLER_VIEW] + 1;
3946 for(i = 0; i < key->nr_sampler_views; ++i) {
3947 /*
3948 * Note sview may exceed what's representable by file_mask.
3949 * This will still work, the only downside is that not actually
3950 * used views may be included in the shader key.
3951 */
3952 if(shader->info.base.file_mask[TGSI_FILE_SAMPLER_VIEW] & (1u << (i & 31))) {
3953 lp_sampler_static_texture_state(&fs_sampler[i].texture_state,
3954 lp->sampler_views[PIPE_SHADER_FRAGMENT][i]);
3955 }
3956 }
3957 }
3958 else {
3959 key->nr_sampler_views = key->nr_samplers;
3960 for(i = 0; i < key->nr_sampler_views; ++i) {
3961 if(shader->info.base.file_mask[TGSI_FILE_SAMPLER] & (1 << i)) {
3962 lp_sampler_static_texture_state(&fs_sampler[i].texture_state,
3963 lp->sampler_views[PIPE_SHADER_FRAGMENT][i]);
3964 }
3965 }
3966 }
3967
3968 struct lp_image_static_state *lp_image;
3969 lp_image = lp_fs_variant_key_images(key);
3970 key->nr_images = shader->info.base.file_max[TGSI_FILE_IMAGE] + 1;
3971 for (i = 0; i < key->nr_images; ++i) {
3972 if (shader->info.base.file_mask[TGSI_FILE_IMAGE] & (1 << i)) {
3973 lp_sampler_static_texture_state_image(&lp_image[i].image_state,
3974 &lp->images[PIPE_SHADER_FRAGMENT][i]);
3975 }
3976 }
3977 return key;
3978 }
3979
3980
3981
3982 /**
3983 * Update fragment shader state. This is called just prior to drawing
3984 * something when some fragment-related state has changed.
3985 */
3986 void
3987 llvmpipe_update_fs(struct llvmpipe_context *lp)
3988 {
3989 struct lp_fragment_shader *shader = lp->fs;
3990 struct lp_fragment_shader_variant_key *key;
3991 struct lp_fragment_shader_variant *variant = NULL;
3992 struct lp_fs_variant_list_item *li;
3993 char store[LP_FS_MAX_VARIANT_KEY_SIZE];
3994
3995 key = make_variant_key(lp, shader, store);
3996
3997 /* Search the variants for one which matches the key */
3998 li = first_elem(&shader->variants);
3999 while(!at_end(&shader->variants, li)) {
4000 if(memcmp(&li->base->key, key, shader->variant_key_size) == 0) {
4001 variant = li->base;
4002 break;
4003 }
4004 li = next_elem(li);
4005 }
4006
4007 if (variant) {
4008 /* Move this variant to the head of the list to implement LRU
4009 * deletion of shader's when we have too many.
4010 */
4011 move_to_head(&lp->fs_variants_list, &variant->list_item_global);
4012 }
4013 else {
4014 /* variant not found, create it now */
4015 int64_t t0, t1, dt;
4016 unsigned i;
4017 unsigned variants_to_cull;
4018
4019 if (LP_DEBUG & DEBUG_FS) {
4020 debug_printf("%u variants,\t%u instrs,\t%u instrs/variant\n",
4021 lp->nr_fs_variants,
4022 lp->nr_fs_instrs,
4023 lp->nr_fs_variants ? lp->nr_fs_instrs / lp->nr_fs_variants : 0);
4024 }
4025
4026 /* First, check if we've exceeded the max number of shader variants.
4027 * If so, free 6.25% of them (the least recently used ones).
4028 */
4029 variants_to_cull = lp->nr_fs_variants >= LP_MAX_SHADER_VARIANTS ? LP_MAX_SHADER_VARIANTS / 16 : 0;
4030
4031 if (variants_to_cull ||
4032 lp->nr_fs_instrs >= LP_MAX_SHADER_INSTRUCTIONS) {
4033 struct pipe_context *pipe = &lp->pipe;
4034
4035 if (gallivm_debug & GALLIVM_DEBUG_PERF) {
4036 debug_printf("Evicting FS: %u fs variants,\t%u total variants,"
4037 "\t%u instrs,\t%u instrs/variant\n",
4038 shader->variants_cached,
4039 lp->nr_fs_variants, lp->nr_fs_instrs,
4040 lp->nr_fs_instrs / lp->nr_fs_variants);
4041 }
4042
4043 /*
4044 * XXX: we need to flush the context until we have some sort of
4045 * reference counting in fragment shaders as they may still be binned
4046 * Flushing alone might not be sufficient we need to wait on it too.
4047 */
4048 llvmpipe_finish(pipe, __FUNCTION__);
4049
4050 /*
4051 * We need to re-check lp->nr_fs_variants because an arbitrarliy large
4052 * number of shader variants (potentially all of them) could be
4053 * pending for destruction on flush.
4054 */
4055
4056 for (i = 0; i < variants_to_cull || lp->nr_fs_instrs >= LP_MAX_SHADER_INSTRUCTIONS; i++) {
4057 struct lp_fs_variant_list_item *item;
4058 if (is_empty_list(&lp->fs_variants_list)) {
4059 break;
4060 }
4061 item = last_elem(&lp->fs_variants_list);
4062 assert(item);
4063 assert(item->base);
4064 llvmpipe_remove_shader_variant(lp, item->base);
4065 }
4066 }
4067
4068 /*
4069 * Generate the new variant.
4070 */
4071 t0 = os_time_get();
4072 variant = generate_variant(lp, shader, key);
4073 t1 = os_time_get();
4074 dt = t1 - t0;
4075 LP_COUNT_ADD(llvm_compile_time, dt);
4076 LP_COUNT_ADD(nr_llvm_compiles, 2); /* emit vs. omit in/out test */
4077
4078 /* Put the new variant into the list */
4079 if (variant) {
4080 insert_at_head(&shader->variants, &variant->list_item_local);
4081 insert_at_head(&lp->fs_variants_list, &variant->list_item_global);
4082 lp->nr_fs_variants++;
4083 lp->nr_fs_instrs += variant->nr_instrs;
4084 shader->variants_cached++;
4085 }
4086 }
4087
4088 /* Bind this variant */
4089 lp_setup_set_fs_variant(lp->setup, variant);
4090 }
4091
4092
4093
4094
4095
4096 void
4097 llvmpipe_init_fs_funcs(struct llvmpipe_context *llvmpipe)
4098 {
4099 llvmpipe->pipe.create_fs_state = llvmpipe_create_fs_state;
4100 llvmpipe->pipe.bind_fs_state = llvmpipe_bind_fs_state;
4101 llvmpipe->pipe.delete_fs_state = llvmpipe_delete_fs_state;
4102
4103 llvmpipe->pipe.set_constant_buffer = llvmpipe_set_constant_buffer;
4104
4105 llvmpipe->pipe.set_shader_buffers = llvmpipe_set_shader_buffers;
4106 llvmpipe->pipe.set_shader_images = llvmpipe_set_shader_images;
4107 }
4108
4109