radeonsi: cleanup includes, add missing license
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Tom Stellard <thomas.stellard@amd.com>
25 * Michel Dänzer <michel.daenzer@amd.com>
26 * Christian König <christian.koenig@amd.com>
27 */
28
29 #include "gallivm/lp_bld_const.h"
30 #include "gallivm/lp_bld_gather.h"
31 #include "gallivm/lp_bld_intr.h"
32 #include "gallivm/lp_bld_logic.h"
33 #include "gallivm/lp_bld_arit.h"
34 #include "gallivm/lp_bld_flow.h"
35 #include "radeon_llvm.h"
36 #include "radeon_llvm_emit.h"
37 #include "util/u_memory.h"
38 #include "tgsi/tgsi_parse.h"
39 #include "tgsi/tgsi_util.h"
40 #include "tgsi/tgsi_dump.h"
41
42 #include "si_pipe.h"
43 #include "si_shader.h"
44 #include "sid.h"
45
46 #include <errno.h>
47
48 struct si_shader_context
49 {
50 struct radeon_llvm_context radeon_bld;
51 struct tgsi_parse_context parse;
52 struct tgsi_token * tokens;
53 struct si_pipe_shader *shader;
54 unsigned type; /* TGSI_PROCESSOR_* specifies the type of shader. */
55 int param_streamout_config;
56 int param_streamout_write_index;
57 int param_streamout_offset[4];
58 int param_vertex_id;
59 int param_instance_id;
60 LLVMValueRef const_md;
61 LLVMValueRef const_resource[NUM_CONST_BUFFERS];
62 #if HAVE_LLVM >= 0x0304
63 LLVMValueRef ddxy_lds;
64 #endif
65 LLVMValueRef *constants[NUM_CONST_BUFFERS];
66 LLVMValueRef *resources;
67 LLVMValueRef *samplers;
68 LLVMValueRef so_buffers[4];
69 };
70
71 static struct si_shader_context * si_shader_context(
72 struct lp_build_tgsi_context * bld_base)
73 {
74 return (struct si_shader_context *)bld_base;
75 }
76
77
78 #define PERSPECTIVE_BASE 0
79 #define LINEAR_BASE 9
80
81 #define SAMPLE_OFFSET 0
82 #define CENTER_OFFSET 2
83 #define CENTROID_OFSET 4
84
85 #define USE_SGPR_MAX_SUFFIX_LEN 5
86 #define CONST_ADDR_SPACE 2
87 #define LOCAL_ADDR_SPACE 3
88 #define USER_SGPR_ADDR_SPACE 8
89
90 /**
91 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad
92 *
93 * @param offset The offset parameter specifies the number of
94 * elements to offset, not the number of bytes or dwords. An element is the
95 * the type pointed to by the base_ptr parameter (e.g. int is the element of
96 * an int* pointer)
97 *
98 * When LLVM lowers the load instruction, it will convert the element offset
99 * into a dword offset automatically.
100 *
101 */
102 static LLVMValueRef build_indexed_load(
103 struct si_shader_context * si_shader_ctx,
104 LLVMValueRef base_ptr,
105 LLVMValueRef offset)
106 {
107 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
108
109 LLVMValueRef indices[2] = {
110 LLVMConstInt(LLVMInt64TypeInContext(base->gallivm->context), 0, false),
111 offset
112 };
113 LLVMValueRef computed_ptr = LLVMBuildGEP(
114 base->gallivm->builder, base_ptr, indices, 2, "");
115
116 LLVMValueRef result = LLVMBuildLoad(base->gallivm->builder, computed_ptr, "");
117 LLVMSetMetadata(result, 1, si_shader_ctx->const_md);
118 return result;
119 }
120
121 static LLVMValueRef get_instance_index_for_fetch(
122 struct radeon_llvm_context * radeon_bld,
123 unsigned divisor)
124 {
125 struct si_shader_context *si_shader_ctx =
126 si_shader_context(&radeon_bld->soa.bld_base);
127 struct gallivm_state * gallivm = radeon_bld->soa.bld_base.base.gallivm;
128
129 LLVMValueRef result = LLVMGetParam(radeon_bld->main_fn,
130 si_shader_ctx->param_instance_id);
131 result = LLVMBuildAdd(gallivm->builder, result, LLVMGetParam(
132 radeon_bld->main_fn, SI_PARAM_START_INSTANCE), "");
133
134 if (divisor > 1)
135 result = LLVMBuildUDiv(gallivm->builder, result,
136 lp_build_const_int32(gallivm, divisor), "");
137
138 return result;
139 }
140
141 static void declare_input_vs(
142 struct si_shader_context * si_shader_ctx,
143 unsigned input_index,
144 const struct tgsi_full_declaration *decl)
145 {
146 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
147 unsigned divisor = si_shader_ctx->shader->key.vs.instance_divisors[input_index];
148
149 unsigned chan;
150
151 LLVMValueRef t_list_ptr;
152 LLVMValueRef t_offset;
153 LLVMValueRef t_list;
154 LLVMValueRef attribute_offset;
155 LLVMValueRef buffer_index;
156 LLVMValueRef args[3];
157 LLVMTypeRef vec4_type;
158 LLVMValueRef input;
159
160 /* Load the T list */
161 t_list_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_VERTEX_BUFFER);
162
163 t_offset = lp_build_const_int32(base->gallivm, input_index);
164
165 t_list = build_indexed_load(si_shader_ctx, t_list_ptr, t_offset);
166
167 /* Build the attribute offset */
168 attribute_offset = lp_build_const_int32(base->gallivm, 0);
169
170 if (divisor) {
171 /* Build index from instance ID, start instance and divisor */
172 si_shader_ctx->shader->shader.uses_instanceid = true;
173 buffer_index = get_instance_index_for_fetch(&si_shader_ctx->radeon_bld, divisor);
174 } else {
175 /* Load the buffer index, which is always stored in VGPR0
176 * for Vertex Shaders */
177 buffer_index = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
178 si_shader_ctx->param_vertex_id);
179 }
180
181 vec4_type = LLVMVectorType(base->elem_type, 4);
182 args[0] = t_list;
183 args[1] = attribute_offset;
184 args[2] = buffer_index;
185 input = build_intrinsic(base->gallivm->builder,
186 "llvm.SI.vs.load.input", vec4_type, args, 3,
187 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
188
189 /* Break up the vec4 into individual components */
190 for (chan = 0; chan < 4; chan++) {
191 LLVMValueRef llvm_chan = lp_build_const_int32(base->gallivm, chan);
192 /* XXX: Use a helper function for this. There is one in
193 * tgsi_llvm.c. */
194 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, chan)] =
195 LLVMBuildExtractElement(base->gallivm->builder,
196 input, llvm_chan, "");
197 }
198 }
199
200 static void declare_input_fs(
201 struct si_shader_context * si_shader_ctx,
202 unsigned input_index,
203 const struct tgsi_full_declaration *decl)
204 {
205 struct si_shader *shader = &si_shader_ctx->shader->shader;
206 struct lp_build_context * base =
207 &si_shader_ctx->radeon_bld.soa.bld_base.base;
208 struct lp_build_context *uint =
209 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
210 struct gallivm_state * gallivm = base->gallivm;
211 LLVMTypeRef input_type = LLVMFloatTypeInContext(gallivm->context);
212 LLVMValueRef main_fn = si_shader_ctx->radeon_bld.main_fn;
213
214 LLVMValueRef interp_param;
215 const char * intr_name;
216
217 /* This value is:
218 * [15:0] NewPrimMask (Bit mask for each quad. It is set it the
219 * quad begins a new primitive. Bit 0 always needs
220 * to be unset)
221 * [32:16] ParamOffset
222 *
223 */
224 LLVMValueRef params = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_PRIM_MASK);
225 LLVMValueRef attr_number;
226
227 unsigned chan;
228
229 if (decl->Semantic.Name == TGSI_SEMANTIC_POSITION) {
230 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
231 unsigned soa_index =
232 radeon_llvm_reg_index_soa(input_index, chan);
233 si_shader_ctx->radeon_bld.inputs[soa_index] =
234 LLVMGetParam(main_fn, SI_PARAM_POS_X_FLOAT + chan);
235
236 if (chan == 3)
237 /* RCP for fragcoord.w */
238 si_shader_ctx->radeon_bld.inputs[soa_index] =
239 LLVMBuildFDiv(gallivm->builder,
240 lp_build_const_float(gallivm, 1.0f),
241 si_shader_ctx->radeon_bld.inputs[soa_index],
242 "");
243 }
244 return;
245 }
246
247 if (decl->Semantic.Name == TGSI_SEMANTIC_FACE) {
248 LLVMValueRef face, is_face_positive;
249
250 face = LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE);
251
252 is_face_positive = LLVMBuildFCmp(gallivm->builder,
253 LLVMRealUGT, face,
254 lp_build_const_float(gallivm, 0.0f),
255 "");
256
257 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 0)] =
258 LLVMBuildSelect(gallivm->builder,
259 is_face_positive,
260 lp_build_const_float(gallivm, 1.0f),
261 lp_build_const_float(gallivm, 0.0f),
262 "");
263 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 1)] =
264 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 2)] =
265 lp_build_const_float(gallivm, 0.0f);
266 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 3)] =
267 lp_build_const_float(gallivm, 1.0f);
268
269 return;
270 }
271
272 shader->input[input_index].param_offset = shader->ninterp++;
273 attr_number = lp_build_const_int32(gallivm,
274 shader->input[input_index].param_offset);
275
276 switch (decl->Interp.Interpolate) {
277 case TGSI_INTERPOLATE_COLOR:
278 if (si_shader_ctx->shader->key.ps.flatshade) {
279 interp_param = 0;
280 } else {
281 if (decl->Interp.Centroid)
282 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTROID);
283 else
284 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTER);
285 }
286 break;
287 case TGSI_INTERPOLATE_CONSTANT:
288 interp_param = 0;
289 break;
290 case TGSI_INTERPOLATE_LINEAR:
291 if (decl->Interp.Centroid)
292 interp_param = LLVMGetParam(main_fn, SI_PARAM_LINEAR_CENTROID);
293 else
294 interp_param = LLVMGetParam(main_fn, SI_PARAM_LINEAR_CENTER);
295 break;
296 case TGSI_INTERPOLATE_PERSPECTIVE:
297 if (decl->Interp.Centroid)
298 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTROID);
299 else
300 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTER);
301 break;
302 default:
303 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
304 return;
305 }
306
307 intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
308
309 /* XXX: Could there be more than TGSI_NUM_CHANNELS (4) ? */
310 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR &&
311 si_shader_ctx->shader->key.ps.color_two_side) {
312 LLVMValueRef args[4];
313 LLVMValueRef face, is_face_positive;
314 LLVMValueRef back_attr_number =
315 lp_build_const_int32(gallivm,
316 shader->input[input_index].param_offset + 1);
317
318 face = LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE);
319
320 is_face_positive = LLVMBuildFCmp(gallivm->builder,
321 LLVMRealUGT, face,
322 lp_build_const_float(gallivm, 0.0f),
323 "");
324
325 args[2] = params;
326 args[3] = interp_param;
327 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
328 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
329 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
330 LLVMValueRef front, back;
331
332 args[0] = llvm_chan;
333 args[1] = attr_number;
334 front = build_intrinsic(base->gallivm->builder, intr_name,
335 input_type, args, args[3] ? 4 : 3,
336 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
337
338 args[1] = back_attr_number;
339 back = build_intrinsic(base->gallivm->builder, intr_name,
340 input_type, args, args[3] ? 4 : 3,
341 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
342
343 si_shader_ctx->radeon_bld.inputs[soa_index] =
344 LLVMBuildSelect(gallivm->builder,
345 is_face_positive,
346 front,
347 back,
348 "");
349 }
350
351 shader->ninterp++;
352 } else if (decl->Semantic.Name == TGSI_SEMANTIC_FOG) {
353 LLVMValueRef args[4];
354
355 args[0] = uint->zero;
356 args[1] = attr_number;
357 args[2] = params;
358 args[3] = interp_param;
359 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 0)] =
360 build_intrinsic(base->gallivm->builder, intr_name,
361 input_type, args, args[3] ? 4 : 3,
362 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
363 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 1)] =
364 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 2)] =
365 lp_build_const_float(gallivm, 0.0f);
366 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 3)] =
367 lp_build_const_float(gallivm, 1.0f);
368 } else {
369 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
370 LLVMValueRef args[4];
371 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
372 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
373 args[0] = llvm_chan;
374 args[1] = attr_number;
375 args[2] = params;
376 args[3] = interp_param;
377 si_shader_ctx->radeon_bld.inputs[soa_index] =
378 build_intrinsic(base->gallivm->builder, intr_name,
379 input_type, args, args[3] ? 4 : 3,
380 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
381 }
382 }
383 }
384
385 static void declare_input(
386 struct radeon_llvm_context * radeon_bld,
387 unsigned input_index,
388 const struct tgsi_full_declaration *decl)
389 {
390 struct si_shader_context * si_shader_ctx =
391 si_shader_context(&radeon_bld->soa.bld_base);
392 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
393 declare_input_vs(si_shader_ctx, input_index, decl);
394 } else if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
395 declare_input_fs(si_shader_ctx, input_index, decl);
396 } else {
397 fprintf(stderr, "Warning: Unsupported shader type,\n");
398 }
399 }
400
401 static void declare_system_value(
402 struct radeon_llvm_context * radeon_bld,
403 unsigned index,
404 const struct tgsi_full_declaration *decl)
405 {
406 struct si_shader_context *si_shader_ctx =
407 si_shader_context(&radeon_bld->soa.bld_base);
408 LLVMValueRef value = 0;
409
410 switch (decl->Semantic.Name) {
411 case TGSI_SEMANTIC_INSTANCEID:
412 value = LLVMGetParam(radeon_bld->main_fn,
413 si_shader_ctx->param_instance_id);
414 break;
415
416 case TGSI_SEMANTIC_VERTEXID:
417 value = LLVMGetParam(radeon_bld->main_fn,
418 si_shader_ctx->param_vertex_id);
419 break;
420
421 default:
422 assert(!"unknown system value");
423 return;
424 }
425
426 radeon_bld->system_values[index] = value;
427 }
428
429 static LLVMValueRef fetch_constant(
430 struct lp_build_tgsi_context * bld_base,
431 const struct tgsi_full_src_register *reg,
432 enum tgsi_opcode_type type,
433 unsigned swizzle)
434 {
435 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
436 struct lp_build_context * base = &bld_base->base;
437 const struct tgsi_ind_register *ireg = &reg->Indirect;
438 unsigned buf, idx;
439
440 LLVMValueRef args[2];
441 LLVMValueRef addr;
442 LLVMValueRef result;
443
444 if (swizzle == LP_CHAN_ALL) {
445 unsigned chan;
446 LLVMValueRef values[4];
447 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
448 values[chan] = fetch_constant(bld_base, reg, type, chan);
449
450 return lp_build_gather_values(bld_base->base.gallivm, values, 4);
451 }
452
453 buf = reg->Register.Dimension ? reg->Dimension.Index : 0;
454 idx = reg->Register.Index * 4 + swizzle;
455
456 if (!reg->Register.Indirect)
457 return bitcast(bld_base, type, si_shader_ctx->constants[buf][idx]);
458
459 args[0] = si_shader_ctx->const_resource[buf];
460 args[1] = lp_build_const_int32(base->gallivm, idx * 4);
461 addr = si_shader_ctx->radeon_bld.soa.addr[ireg->Index][ireg->Swizzle];
462 addr = LLVMBuildLoad(base->gallivm->builder, addr, "load addr reg");
463 addr = lp_build_mul_imm(&bld_base->uint_bld, addr, 16);
464 args[1] = lp_build_add(&bld_base->uint_bld, addr, args[1]);
465
466 result = build_intrinsic(base->gallivm->builder, "llvm.SI.load.const", base->elem_type,
467 args, 2, LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
468
469 return bitcast(bld_base, type, result);
470 }
471
472 /* Initialize arguments for the shader export intrinsic */
473 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
474 struct tgsi_full_declaration *d,
475 unsigned index,
476 unsigned target,
477 LLVMValueRef *args)
478 {
479 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
480 struct lp_build_context *uint =
481 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
482 struct lp_build_context *base = &bld_base->base;
483 unsigned compressed = 0;
484 unsigned chan;
485
486 if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
487 int cbuf = target - V_008DFC_SQ_EXP_MRT;
488
489 if (cbuf >= 0 && cbuf < 8) {
490 compressed = (si_shader_ctx->shader->key.ps.export_16bpc >> cbuf) & 0x1;
491
492 if (compressed)
493 si_shader_ctx->shader->spi_shader_col_format |=
494 V_028714_SPI_SHADER_FP16_ABGR << (4 * cbuf);
495 else
496 si_shader_ctx->shader->spi_shader_col_format |=
497 V_028714_SPI_SHADER_32_ABGR << (4 * cbuf);
498
499 si_shader_ctx->shader->cb_shader_mask |= 0xf << (4 * cbuf);
500 }
501 }
502
503 if (compressed) {
504 /* Pixel shader needs to pack output values before export */
505 for (chan = 0; chan < 2; chan++ ) {
506 LLVMValueRef *out_ptr =
507 si_shader_ctx->radeon_bld.soa.outputs[index];
508 args[0] = LLVMBuildLoad(base->gallivm->builder,
509 out_ptr[2 * chan], "");
510 args[1] = LLVMBuildLoad(base->gallivm->builder,
511 out_ptr[2 * chan + 1], "");
512 args[chan + 5] =
513 build_intrinsic(base->gallivm->builder,
514 "llvm.SI.packf16",
515 LLVMInt32TypeInContext(base->gallivm->context),
516 args, 2,
517 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
518 args[chan + 7] = args[chan + 5] =
519 LLVMBuildBitCast(base->gallivm->builder,
520 args[chan + 5],
521 LLVMFloatTypeInContext(base->gallivm->context),
522 "");
523 }
524
525 /* Set COMPR flag */
526 args[4] = uint->one;
527 } else {
528 for (chan = 0; chan < 4; chan++ ) {
529 LLVMValueRef out_ptr =
530 si_shader_ctx->radeon_bld.soa.outputs[index][chan];
531 /* +5 because the first output value will be
532 * the 6th argument to the intrinsic. */
533 args[chan + 5] = LLVMBuildLoad(base->gallivm->builder,
534 out_ptr, "");
535 }
536
537 /* Clear COMPR flag */
538 args[4] = uint->zero;
539 }
540
541 /* XXX: This controls which components of the output
542 * registers actually get exported. (e.g bit 0 means export
543 * X component, bit 1 means export Y component, etc.) I'm
544 * hard coding this to 0xf for now. In the future, we might
545 * want to do something else. */
546 args[0] = lp_build_const_int32(base->gallivm, 0xf);
547
548 /* Specify whether the EXEC mask represents the valid mask */
549 args[1] = uint->zero;
550
551 /* Specify whether this is the last export */
552 args[2] = uint->zero;
553
554 /* Specify the target we are exporting */
555 args[3] = lp_build_const_int32(base->gallivm, target);
556
557 /* XXX: We probably need to keep track of the output
558 * values, so we know what we are passing to the next
559 * stage. */
560 }
561
562 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
563 unsigned index)
564 {
565 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
566 struct gallivm_state *gallivm = bld_base->base.gallivm;
567
568 if (si_shader_ctx->shader->key.ps.alpha_func != PIPE_FUNC_NEVER) {
569 LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][3];
570 LLVMValueRef alpha_ref = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
571 SI_PARAM_ALPHA_REF);
572
573 LLVMValueRef alpha_pass =
574 lp_build_cmp(&bld_base->base,
575 si_shader_ctx->shader->key.ps.alpha_func,
576 LLVMBuildLoad(gallivm->builder, out_ptr, ""),
577 alpha_ref);
578 LLVMValueRef arg =
579 lp_build_select(&bld_base->base,
580 alpha_pass,
581 lp_build_const_float(gallivm, 1.0f),
582 lp_build_const_float(gallivm, -1.0f));
583
584 build_intrinsic(gallivm->builder,
585 "llvm.AMDGPU.kill",
586 LLVMVoidTypeInContext(gallivm->context),
587 &arg, 1, 0);
588 } else {
589 build_intrinsic(gallivm->builder,
590 "llvm.AMDGPU.kilp",
591 LLVMVoidTypeInContext(gallivm->context),
592 NULL, 0, 0);
593 }
594 }
595
596 static void si_alpha_to_one(struct lp_build_tgsi_context *bld_base,
597 unsigned index)
598 {
599 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
600
601 /* set alpha to one */
602 LLVMBuildStore(bld_base->base.gallivm->builder,
603 bld_base->base.one,
604 si_shader_ctx->radeon_bld.soa.outputs[index][3]);
605 }
606
607 static void si_llvm_emit_clipvertex(struct lp_build_tgsi_context * bld_base,
608 LLVMValueRef (*pos)[9], unsigned index)
609 {
610 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
611 struct si_pipe_shader *shader = si_shader_ctx->shader;
612 struct lp_build_context *base = &bld_base->base;
613 struct lp_build_context *uint = &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
614 unsigned reg_index;
615 unsigned chan;
616 unsigned const_chan;
617 LLVMValueRef out_elts[4];
618 LLVMValueRef base_elt;
619 LLVMValueRef ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
620 LLVMValueRef constbuf_index = lp_build_const_int32(base->gallivm, NUM_PIPE_CONST_BUFFERS);
621 LLVMValueRef const_resource = build_indexed_load(si_shader_ctx, ptr, constbuf_index);
622
623 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
624 LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][chan];
625 out_elts[chan] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
626 }
627
628 for (reg_index = 0; reg_index < 2; reg_index ++) {
629 LLVMValueRef *args = pos[2 + reg_index];
630
631 if (!(shader->key.vs.ucps_enabled & (1 << reg_index)))
632 continue;
633
634 shader->shader.clip_dist_write |= 0xf << (4 * reg_index);
635
636 args[5] =
637 args[6] =
638 args[7] =
639 args[8] = lp_build_const_float(base->gallivm, 0.0f);
640
641 /* Compute dot products of position and user clip plane vectors */
642 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
643 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
644 args[0] = const_resource;
645 args[1] = lp_build_const_int32(base->gallivm,
646 ((reg_index * 4 + chan) * 4 +
647 const_chan) * 4);
648 base_elt = build_intrinsic(base->gallivm->builder,
649 "llvm.SI.load.const",
650 base->elem_type,
651 args, 2,
652 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
653 args[5 + chan] =
654 lp_build_add(base, args[5 + chan],
655 lp_build_mul(base, base_elt,
656 out_elts[const_chan]));
657 }
658 }
659
660 args[0] = lp_build_const_int32(base->gallivm, 0xf);
661 args[1] = uint->zero;
662 args[2] = uint->zero;
663 args[3] = lp_build_const_int32(base->gallivm,
664 V_008DFC_SQ_EXP_POS + 2 + reg_index);
665 args[4] = uint->zero;
666 }
667 }
668
669 static void si_dump_streamout(struct pipe_stream_output_info *so)
670 {
671 unsigned i;
672
673 if (so->num_outputs)
674 fprintf(stderr, "STREAMOUT\n");
675
676 for (i = 0; i < so->num_outputs; i++) {
677 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
678 so->output[i].start_component;
679 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
680 i, so->output[i].output_buffer,
681 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
682 so->output[i].register_index,
683 mask & 1 ? "x" : "",
684 mask & 2 ? "y" : "",
685 mask & 4 ? "z" : "",
686 mask & 8 ? "w" : "");
687 }
688 }
689
690 /* TBUFFER_STORE_FORMAT_{X,XY,XYZ,XYZW} <- the suffix is selected by num_channels=1..4.
691 * The type of vdata must be one of i32 (num_channels=1), v2i32 (num_channels=2),
692 * or v4i32 (num_channels=3,4). */
693 static void build_tbuffer_store(struct si_shader_context *shader,
694 LLVMValueRef rsrc,
695 LLVMValueRef vdata,
696 unsigned num_channels,
697 LLVMValueRef vaddr,
698 LLVMValueRef soffset,
699 unsigned inst_offset,
700 unsigned dfmt,
701 unsigned nfmt,
702 unsigned offen,
703 unsigned idxen,
704 unsigned glc,
705 unsigned slc,
706 unsigned tfe)
707 {
708 struct gallivm_state *gallivm = &shader->radeon_bld.gallivm;
709 LLVMTypeRef i32 = LLVMInt32TypeInContext(gallivm->context);
710 LLVMValueRef args[] = {
711 rsrc,
712 vdata,
713 LLVMConstInt(i32, num_channels, 0),
714 vaddr,
715 soffset,
716 LLVMConstInt(i32, inst_offset, 0),
717 LLVMConstInt(i32, dfmt, 0),
718 LLVMConstInt(i32, nfmt, 0),
719 LLVMConstInt(i32, offen, 0),
720 LLVMConstInt(i32, idxen, 0),
721 LLVMConstInt(i32, glc, 0),
722 LLVMConstInt(i32, slc, 0),
723 LLVMConstInt(i32, tfe, 0)
724 };
725
726 /* The intrinsic is overloaded, we need to add a type suffix for overloading to work. */
727 unsigned func = CLAMP(num_channels, 1, 3) - 1;
728 const char *types[] = {"i32", "v2i32", "v4i32"};
729 char name[256];
730 snprintf(name, sizeof(name), "llvm.SI.tbuffer.store.%s", types[func]);
731
732 lp_build_intrinsic(gallivm->builder, name,
733 LLVMVoidTypeInContext(gallivm->context),
734 args, Elements(args));
735 }
736
737 static void build_streamout_store(struct si_shader_context *shader,
738 LLVMValueRef rsrc,
739 LLVMValueRef vdata,
740 unsigned num_channels,
741 LLVMValueRef vaddr,
742 LLVMValueRef soffset,
743 unsigned inst_offset)
744 {
745 static unsigned dfmt[] = {
746 V_008F0C_BUF_DATA_FORMAT_32,
747 V_008F0C_BUF_DATA_FORMAT_32_32,
748 V_008F0C_BUF_DATA_FORMAT_32_32_32,
749 V_008F0C_BUF_DATA_FORMAT_32_32_32_32
750 };
751 assert(num_channels >= 1 && num_channels <= 4);
752
753 build_tbuffer_store(shader, rsrc, vdata, num_channels, vaddr, soffset,
754 inst_offset, dfmt[num_channels-1],
755 V_008F0C_BUF_NUM_FORMAT_UINT, 1, 0, 1, 1, 0);
756 }
757
758 /* On SI, the vertex shader is responsible for writing streamout data
759 * to buffers. */
760 static void si_llvm_emit_streamout(struct si_shader_context *shader)
761 {
762 struct pipe_stream_output_info *so = &shader->shader->selector->so;
763 struct gallivm_state *gallivm = &shader->radeon_bld.gallivm;
764 LLVMBuilderRef builder = gallivm->builder;
765 int i, j;
766 struct lp_build_if_state if_ctx;
767
768 LLVMTypeRef i32 = LLVMInt32TypeInContext(gallivm->context);
769
770 LLVMValueRef so_param =
771 LLVMGetParam(shader->radeon_bld.main_fn,
772 shader->param_streamout_config);
773
774 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
775 LLVMValueRef so_vtx_count =
776 LLVMBuildAnd(builder,
777 LLVMBuildLShr(builder, so_param,
778 LLVMConstInt(i32, 16, 0), ""),
779 LLVMConstInt(i32, 127, 0), "");
780
781 LLVMValueRef tid = build_intrinsic(builder, "llvm.SI.tid", i32,
782 NULL, 0, LLVMReadNoneAttribute);
783
784 /* can_emit = tid < so_vtx_count; */
785 LLVMValueRef can_emit =
786 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
787
788 /* Emit the streamout code conditionally. This actually avoids
789 * out-of-bounds buffer access. The hw tells us via the SGPR
790 * (so_vtx_count) which threads are allowed to emit streamout data. */
791 lp_build_if(&if_ctx, gallivm, can_emit);
792 {
793 /* The buffer offset is computed as follows:
794 * ByteOffset = streamout_offset[buffer_id]*4 +
795 * (streamout_write_index + thread_id)*stride[buffer_id] +
796 * attrib_offset
797 */
798
799 LLVMValueRef so_write_index =
800 LLVMGetParam(shader->radeon_bld.main_fn,
801 shader->param_streamout_write_index);
802
803 /* Compute (streamout_write_index + thread_id). */
804 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
805
806 /* Compute the write offset for each enabled buffer. */
807 LLVMValueRef so_write_offset[4] = {};
808 for (i = 0; i < 4; i++) {
809 if (!so->stride[i])
810 continue;
811
812 LLVMValueRef so_offset = LLVMGetParam(shader->radeon_bld.main_fn,
813 shader->param_streamout_offset[i]);
814 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(i32, 4, 0), "");
815
816 so_write_offset[i] = LLVMBuildMul(builder, so_write_index,
817 LLVMConstInt(i32, so->stride[i]*4, 0), "");
818 so_write_offset[i] = LLVMBuildAdd(builder, so_write_offset[i], so_offset, "");
819 }
820
821 LLVMValueRef (*outputs)[TGSI_NUM_CHANNELS] = shader->radeon_bld.soa.outputs;
822
823 /* Write streamout data. */
824 for (i = 0; i < so->num_outputs; i++) {
825 unsigned buf_idx = so->output[i].output_buffer;
826 unsigned reg = so->output[i].register_index;
827 unsigned start = so->output[i].start_component;
828 unsigned num_comps = so->output[i].num_components;
829 LLVMValueRef out[4];
830
831 assert(num_comps && num_comps <= 4);
832 if (!num_comps || num_comps > 4)
833 continue;
834
835 /* Load the output as int. */
836 for (j = 0; j < num_comps; j++) {
837 out[j] = LLVMBuildLoad(builder, outputs[reg][start+j], "");
838 out[j] = LLVMBuildBitCast(builder, out[j], i32, "");
839 }
840
841 /* Pack the output. */
842 LLVMValueRef vdata = NULL;
843
844 switch (num_comps) {
845 case 1: /* as i32 */
846 vdata = out[0];
847 break;
848 case 2: /* as v2i32 */
849 case 3: /* as v4i32 (aligned to 4) */
850 case 4: /* as v4i32 */
851 vdata = LLVMGetUndef(LLVMVectorType(i32, util_next_power_of_two(num_comps)));
852 for (j = 0; j < num_comps; j++) {
853 vdata = LLVMBuildInsertElement(builder, vdata, out[j],
854 LLVMConstInt(i32, j, 0), "");
855 }
856 break;
857 }
858
859 build_streamout_store(shader, shader->so_buffers[buf_idx],
860 vdata, num_comps,
861 so_write_offset[buf_idx],
862 LLVMConstInt(i32, 0, 0),
863 so->output[i].dst_offset*4);
864 }
865 }
866 lp_build_endif(&if_ctx);
867 }
868
869
870 static void si_llvm_emit_epilogue(struct lp_build_tgsi_context * bld_base)
871 {
872 struct si_shader_context * si_shader_ctx = si_shader_context(bld_base);
873 struct si_shader * shader = &si_shader_ctx->shader->shader;
874 struct lp_build_context * base = &bld_base->base;
875 struct lp_build_context * uint =
876 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
877 struct tgsi_parse_context *parse = &si_shader_ctx->parse;
878 LLVMValueRef args[9];
879 LLVMValueRef last_args[9] = { 0 };
880 LLVMValueRef pos_args[4][9] = { { 0 } };
881 unsigned semantic_name;
882 unsigned param_count = 0;
883 int depth_index = -1, stencil_index = -1, psize_index = -1, edgeflag_index = -1;
884 int layer_index = -1;
885 int i;
886
887 if (si_shader_ctx->shader->selector->so.num_outputs) {
888 si_llvm_emit_streamout(si_shader_ctx);
889 }
890
891 while (!tgsi_parse_end_of_tokens(parse)) {
892 struct tgsi_full_declaration *d =
893 &parse->FullToken.FullDeclaration;
894 unsigned target;
895 unsigned index;
896
897 tgsi_parse_token(parse);
898
899 if (parse->FullToken.Token.Type == TGSI_TOKEN_TYPE_PROPERTY &&
900 parse->FullToken.FullProperty.Property.PropertyName ==
901 TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS)
902 shader->fs_write_all = TRUE;
903
904 if (parse->FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
905 continue;
906
907 switch (d->Declaration.File) {
908 case TGSI_FILE_INPUT:
909 i = shader->ninput++;
910 assert(i < Elements(shader->input));
911 shader->input[i].name = d->Semantic.Name;
912 shader->input[i].sid = d->Semantic.Index;
913 shader->input[i].interpolate = d->Interp.Interpolate;
914 shader->input[i].centroid = d->Interp.Centroid;
915 continue;
916
917 case TGSI_FILE_OUTPUT:
918 i = shader->noutput++;
919 assert(i < Elements(shader->output));
920 shader->output[i].name = d->Semantic.Name;
921 shader->output[i].sid = d->Semantic.Index;
922 shader->output[i].interpolate = d->Interp.Interpolate;
923 break;
924
925 default:
926 continue;
927 }
928
929 semantic_name = d->Semantic.Name;
930 handle_semantic:
931 for (index = d->Range.First; index <= d->Range.Last; index++) {
932 /* Select the correct target */
933 switch(semantic_name) {
934 case TGSI_SEMANTIC_PSIZE:
935 shader->vs_out_misc_write = true;
936 shader->vs_out_point_size = true;
937 psize_index = index;
938 continue;
939 case TGSI_SEMANTIC_EDGEFLAG:
940 shader->vs_out_misc_write = true;
941 shader->vs_out_edgeflag = true;
942 edgeflag_index = index;
943 continue;
944 case TGSI_SEMANTIC_LAYER:
945 shader->vs_out_misc_write = true;
946 shader->vs_out_layer = true;
947 layer_index = index;
948 continue;
949 case TGSI_SEMANTIC_POSITION:
950 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
951 target = V_008DFC_SQ_EXP_POS;
952 break;
953 } else {
954 depth_index = index;
955 continue;
956 }
957 case TGSI_SEMANTIC_STENCIL:
958 stencil_index = index;
959 continue;
960 case TGSI_SEMANTIC_COLOR:
961 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
962 case TGSI_SEMANTIC_BCOLOR:
963 target = V_008DFC_SQ_EXP_PARAM + param_count;
964 shader->output[i].param_offset = param_count;
965 param_count++;
966 } else {
967 target = V_008DFC_SQ_EXP_MRT + shader->output[i].sid;
968 if (si_shader_ctx->shader->key.ps.alpha_to_one) {
969 si_alpha_to_one(bld_base, index);
970 }
971 if (shader->output[i].sid == 0 &&
972 si_shader_ctx->shader->key.ps.alpha_func != PIPE_FUNC_ALWAYS)
973 si_alpha_test(bld_base, index);
974 }
975 break;
976 case TGSI_SEMANTIC_CLIPDIST:
977 if (!(si_shader_ctx->shader->key.vs.ucps_enabled &
978 (1 << d->Semantic.Index)))
979 continue;
980 shader->clip_dist_write |=
981 d->Declaration.UsageMask << (d->Semantic.Index << 2);
982 target = V_008DFC_SQ_EXP_POS + 2 + d->Semantic.Index;
983 break;
984 case TGSI_SEMANTIC_CLIPVERTEX:
985 si_llvm_emit_clipvertex(bld_base, pos_args, index);
986 continue;
987 case TGSI_SEMANTIC_FOG:
988 case TGSI_SEMANTIC_GENERIC:
989 target = V_008DFC_SQ_EXP_PARAM + param_count;
990 shader->output[i].param_offset = param_count;
991 param_count++;
992 break;
993 default:
994 target = 0;
995 fprintf(stderr,
996 "Warning: SI unhandled output type:%d\n",
997 semantic_name);
998 }
999
1000 si_llvm_init_export_args(bld_base, d, index, target, args);
1001
1002 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX &&
1003 target >= V_008DFC_SQ_EXP_POS &&
1004 target <= (V_008DFC_SQ_EXP_POS + 3)) {
1005 memcpy(pos_args[target - V_008DFC_SQ_EXP_POS],
1006 args, sizeof(args));
1007 } else if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT &&
1008 semantic_name == TGSI_SEMANTIC_COLOR) {
1009 /* If there is an export instruction waiting to be emitted, do so now. */
1010 if (last_args[0]) {
1011 lp_build_intrinsic(base->gallivm->builder,
1012 "llvm.SI.export",
1013 LLVMVoidTypeInContext(base->gallivm->context),
1014 last_args, 9);
1015 }
1016
1017 /* This instruction will be emitted at the end of the shader. */
1018 memcpy(last_args, args, sizeof(args));
1019
1020 /* Handle FS_COLOR0_WRITES_ALL_CBUFS. */
1021 if (shader->fs_write_all && shader->output[i].sid == 0 &&
1022 si_shader_ctx->shader->key.ps.nr_cbufs > 1) {
1023 for (int c = 1; c < si_shader_ctx->shader->key.ps.nr_cbufs; c++) {
1024 si_llvm_init_export_args(bld_base, d, index,
1025 V_008DFC_SQ_EXP_MRT + c, args);
1026 lp_build_intrinsic(base->gallivm->builder,
1027 "llvm.SI.export",
1028 LLVMVoidTypeInContext(base->gallivm->context),
1029 args, 9);
1030 }
1031 }
1032 } else {
1033 lp_build_intrinsic(base->gallivm->builder,
1034 "llvm.SI.export",
1035 LLVMVoidTypeInContext(base->gallivm->context),
1036 args, 9);
1037 }
1038 }
1039
1040 if (semantic_name == TGSI_SEMANTIC_CLIPDIST) {
1041 semantic_name = TGSI_SEMANTIC_GENERIC;
1042 goto handle_semantic;
1043 }
1044 }
1045
1046 if (depth_index >= 0 || stencil_index >= 0) {
1047 LLVMValueRef out_ptr;
1048 unsigned mask = 0;
1049
1050 /* Specify the target we are exporting */
1051 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRTZ);
1052
1053 if (depth_index >= 0) {
1054 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[depth_index][2];
1055 args[5] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
1056 mask |= 0x1;
1057
1058 if (stencil_index < 0) {
1059 args[6] =
1060 args[7] =
1061 args[8] = args[5];
1062 }
1063 }
1064
1065 if (stencil_index >= 0) {
1066 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[stencil_index][1];
1067 args[7] =
1068 args[8] =
1069 args[6] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
1070 /* Only setting the stencil component bit (0x2) here
1071 * breaks some stencil piglit tests
1072 */
1073 mask |= 0x3;
1074
1075 if (depth_index < 0)
1076 args[5] = args[6];
1077 }
1078
1079 /* Specify which components to enable */
1080 args[0] = lp_build_const_int32(base->gallivm, mask);
1081
1082 args[1] =
1083 args[2] =
1084 args[4] = uint->zero;
1085
1086 if (last_args[0])
1087 lp_build_intrinsic(base->gallivm->builder,
1088 "llvm.SI.export",
1089 LLVMVoidTypeInContext(base->gallivm->context),
1090 args, 9);
1091 else
1092 memcpy(last_args, args, sizeof(args));
1093 }
1094
1095 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
1096 unsigned pos_idx = 0;
1097
1098 /* We need to add the position output manually if it's missing. */
1099 if (!pos_args[0][0]) {
1100 pos_args[0][0] = lp_build_const_int32(base->gallivm, 0xf); /* writemask */
1101 pos_args[0][1] = uint->zero; /* EXEC mask */
1102 pos_args[0][2] = uint->zero; /* last export? */
1103 pos_args[0][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS);
1104 pos_args[0][4] = uint->zero; /* COMPR flag */
1105 pos_args[0][5] = base->zero; /* X */
1106 pos_args[0][6] = base->zero; /* Y */
1107 pos_args[0][7] = base->zero; /* Z */
1108 pos_args[0][8] = base->one; /* W */
1109 }
1110
1111 /* Write the misc vector (point size, edgeflag, layer, viewport). */
1112 if (shader->vs_out_misc_write) {
1113 pos_args[1][0] = lp_build_const_int32(base->gallivm, /* writemask */
1114 shader->vs_out_point_size |
1115 (shader->vs_out_edgeflag << 1) |
1116 (shader->vs_out_layer << 2));
1117 pos_args[1][1] = uint->zero; /* EXEC mask */
1118 pos_args[1][2] = uint->zero; /* last export? */
1119 pos_args[1][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS + 1);
1120 pos_args[1][4] = uint->zero; /* COMPR flag */
1121 pos_args[1][5] = base->zero; /* X */
1122 pos_args[1][6] = base->zero; /* Y */
1123 pos_args[1][7] = base->zero; /* Z */
1124 pos_args[1][8] = base->zero; /* W */
1125
1126 if (shader->vs_out_point_size) {
1127 pos_args[1][5] = LLVMBuildLoad(base->gallivm->builder,
1128 si_shader_ctx->radeon_bld.soa.outputs[psize_index][0], "");
1129 }
1130
1131 if (shader->vs_out_edgeflag) {
1132 LLVMValueRef output = LLVMBuildLoad(base->gallivm->builder,
1133 si_shader_ctx->radeon_bld.soa.outputs[edgeflag_index][0], "");
1134
1135 /* The output is a float, but the hw expects an integer
1136 * with the first bit containing the edge flag. */
1137 output = LLVMBuildFPToUI(base->gallivm->builder, output,
1138 bld_base->uint_bld.elem_type, "");
1139
1140 output = lp_build_min(&bld_base->int_bld, output, bld_base->int_bld.one);
1141
1142 /* The LLVM intrinsic expects a float. */
1143 pos_args[1][6] = LLVMBuildBitCast(base->gallivm->builder, output,
1144 base->elem_type, "");
1145 }
1146
1147 if (shader->vs_out_layer) {
1148 pos_args[1][7] = LLVMBuildLoad(base->gallivm->builder,
1149 si_shader_ctx->radeon_bld.soa.outputs[layer_index][0], "");
1150 }
1151 }
1152
1153 for (i = 0; i < 4; i++)
1154 if (pos_args[i][0])
1155 shader->nr_pos_exports++;
1156
1157 for (i = 0; i < 4; i++) {
1158 if (!pos_args[i][0])
1159 continue;
1160
1161 /* Specify the target we are exporting */
1162 pos_args[i][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS + pos_idx++);
1163
1164 if (pos_idx == shader->nr_pos_exports)
1165 /* Specify that this is the last export */
1166 pos_args[i][2] = uint->one;
1167
1168 lp_build_intrinsic(base->gallivm->builder,
1169 "llvm.SI.export",
1170 LLVMVoidTypeInContext(base->gallivm->context),
1171 pos_args[i], 9);
1172 }
1173 } else {
1174 if (!last_args[0]) {
1175 /* Specify which components to enable */
1176 last_args[0] = lp_build_const_int32(base->gallivm, 0x0);
1177
1178 /* Specify the target we are exporting */
1179 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
1180
1181 /* Set COMPR flag to zero to export data as 32-bit */
1182 last_args[4] = uint->zero;
1183
1184 /* dummy bits */
1185 last_args[5]= uint->zero;
1186 last_args[6]= uint->zero;
1187 last_args[7]= uint->zero;
1188 last_args[8]= uint->zero;
1189
1190 si_shader_ctx->shader->spi_shader_col_format |=
1191 V_028714_SPI_SHADER_32_ABGR;
1192 si_shader_ctx->shader->cb_shader_mask |= S_02823C_OUTPUT0_ENABLE(0xf);
1193 }
1194
1195 /* Specify whether the EXEC mask represents the valid mask */
1196 last_args[1] = uint->one;
1197
1198 /* Specify that this is the last export */
1199 last_args[2] = lp_build_const_int32(base->gallivm, 1);
1200
1201 lp_build_intrinsic(base->gallivm->builder,
1202 "llvm.SI.export",
1203 LLVMVoidTypeInContext(base->gallivm->context),
1204 last_args, 9);
1205 }
1206 }
1207
1208 static const struct lp_build_tgsi_action txf_action;
1209
1210 static void build_tex_intrinsic(const struct lp_build_tgsi_action * action,
1211 struct lp_build_tgsi_context * bld_base,
1212 struct lp_build_emit_data * emit_data);
1213
1214 static void tex_fetch_args(
1215 struct lp_build_tgsi_context * bld_base,
1216 struct lp_build_emit_data * emit_data)
1217 {
1218 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
1219 struct gallivm_state *gallivm = bld_base->base.gallivm;
1220 const struct tgsi_full_instruction * inst = emit_data->inst;
1221 unsigned opcode = inst->Instruction.Opcode;
1222 unsigned target = inst->Texture.Texture;
1223 LLVMValueRef coords[4];
1224 LLVMValueRef address[16];
1225 int ref_pos;
1226 unsigned num_coords = tgsi_util_get_texture_coord_dim(target, &ref_pos);
1227 unsigned count = 0;
1228 unsigned chan;
1229 unsigned sampler_src = emit_data->inst->Instruction.NumSrcRegs - 1;
1230 unsigned sampler_index = emit_data->inst->Src[sampler_src].Register.Index;
1231
1232 if (target == TGSI_TEXTURE_BUFFER) {
1233 LLVMTypeRef i128 = LLVMIntTypeInContext(gallivm->context, 128);
1234 LLVMTypeRef v2i128 = LLVMVectorType(i128, 2);
1235 LLVMTypeRef i8 = LLVMInt8TypeInContext(gallivm->context);
1236 LLVMTypeRef v16i8 = LLVMVectorType(i8, 16);
1237
1238 /* Truncate v32i8 to v16i8. */
1239 LLVMValueRef res = si_shader_ctx->resources[sampler_index];
1240 res = LLVMBuildBitCast(gallivm->builder, res, v2i128, "");
1241 res = LLVMBuildExtractElement(gallivm->builder, res, bld_base->uint_bld.zero, "");
1242 res = LLVMBuildBitCast(gallivm->builder, res, v16i8, "");
1243
1244 emit_data->dst_type = LLVMVectorType(bld_base->base.elem_type, 4);
1245 emit_data->args[0] = res;
1246 emit_data->args[1] = bld_base->uint_bld.zero;
1247 emit_data->args[2] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, 0);
1248 emit_data->arg_count = 3;
1249 return;
1250 }
1251
1252 /* Fetch and project texture coordinates */
1253 coords[3] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
1254 for (chan = 0; chan < 3; chan++ ) {
1255 coords[chan] = lp_build_emit_fetch(bld_base,
1256 emit_data->inst, 0,
1257 chan);
1258 if (opcode == TGSI_OPCODE_TXP)
1259 coords[chan] = lp_build_emit_llvm_binary(bld_base,
1260 TGSI_OPCODE_DIV,
1261 coords[chan],
1262 coords[3]);
1263 }
1264
1265 if (opcode == TGSI_OPCODE_TXP)
1266 coords[3] = bld_base->base.one;
1267
1268 /* Pack LOD bias value */
1269 if (opcode == TGSI_OPCODE_TXB)
1270 address[count++] = coords[3];
1271
1272 if (target == TGSI_TEXTURE_CUBE || target == TGSI_TEXTURE_SHADOWCUBE)
1273 radeon_llvm_emit_prepare_cube_coords(bld_base, emit_data, coords);
1274
1275 /* Pack depth comparison value */
1276 switch (target) {
1277 case TGSI_TEXTURE_SHADOW1D:
1278 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1279 case TGSI_TEXTURE_SHADOW2D:
1280 case TGSI_TEXTURE_SHADOWRECT:
1281 case TGSI_TEXTURE_SHADOWCUBE:
1282 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1283 assert(ref_pos >= 0);
1284 address[count++] = coords[ref_pos];
1285 break;
1286 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
1287 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, 0);
1288 }
1289
1290 /* Pack user derivatives */
1291 if (opcode == TGSI_OPCODE_TXD) {
1292 for (chan = 0; chan < 2; chan++) {
1293 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, chan);
1294 if (num_coords > 1)
1295 address[count++] = lp_build_emit_fetch(bld_base, inst, 2, chan);
1296 }
1297 }
1298
1299 /* Pack texture coordinates */
1300 address[count++] = coords[0];
1301 if (num_coords > 1)
1302 address[count++] = coords[1];
1303 if (num_coords > 2)
1304 address[count++] = coords[2];
1305
1306 /* Pack LOD or sample index */
1307 if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXF)
1308 address[count++] = coords[3];
1309
1310 if (count > 16) {
1311 assert(!"Cannot handle more than 16 texture address parameters");
1312 count = 16;
1313 }
1314
1315 for (chan = 0; chan < count; chan++ ) {
1316 address[chan] = LLVMBuildBitCast(gallivm->builder,
1317 address[chan],
1318 LLVMInt32TypeInContext(gallivm->context),
1319 "");
1320 }
1321
1322 /* Adjust the sample index according to FMASK.
1323 *
1324 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
1325 * which is the identity mapping. Each nibble says which physical sample
1326 * should be fetched to get that sample.
1327 *
1328 * For example, 0x11111100 means there are only 2 samples stored and
1329 * the second sample covers 3/4 of the pixel. When reading samples 0
1330 * and 1, return physical sample 0 (determined by the first two 0s
1331 * in FMASK), otherwise return physical sample 1.
1332 *
1333 * The sample index should be adjusted as follows:
1334 * sample_index = (fmask >> (sample_index * 4)) & 0xF;
1335 */
1336 if (target == TGSI_TEXTURE_2D_MSAA ||
1337 target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
1338 struct lp_build_context *uint_bld = &bld_base->uint_bld;
1339 struct lp_build_emit_data txf_emit_data = *emit_data;
1340 LLVMValueRef txf_address[4];
1341 unsigned txf_count = count;
1342
1343 memcpy(txf_address, address, sizeof(txf_address));
1344
1345 if (target == TGSI_TEXTURE_2D_MSAA) {
1346 txf_address[2] = bld_base->uint_bld.zero;
1347 }
1348 txf_address[3] = bld_base->uint_bld.zero;
1349
1350 /* Pad to a power-of-two size. */
1351 while (txf_count < util_next_power_of_two(txf_count))
1352 txf_address[txf_count++] = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
1353
1354 /* Read FMASK using TXF. */
1355 txf_emit_data.chan = 0;
1356 txf_emit_data.dst_type = LLVMVectorType(
1357 LLVMInt32TypeInContext(bld_base->base.gallivm->context), 4);
1358 txf_emit_data.args[0] = lp_build_gather_values(gallivm, txf_address, txf_count);
1359 txf_emit_data.args[1] = si_shader_ctx->resources[FMASK_TEX_OFFSET + sampler_index];
1360 txf_emit_data.args[2] = lp_build_const_int32(bld_base->base.gallivm,
1361 target == TGSI_TEXTURE_2D_MSAA ? TGSI_TEXTURE_2D : TGSI_TEXTURE_2D_ARRAY);
1362 txf_emit_data.arg_count = 3;
1363
1364 build_tex_intrinsic(&txf_action, bld_base, &txf_emit_data);
1365
1366 /* Initialize some constants. */
1367 LLVMValueRef four = LLVMConstInt(uint_bld->elem_type, 4, 0);
1368 LLVMValueRef F = LLVMConstInt(uint_bld->elem_type, 0xF, 0);
1369
1370 /* Apply the formula. */
1371 LLVMValueRef fmask =
1372 LLVMBuildExtractElement(gallivm->builder,
1373 txf_emit_data.output[0],
1374 uint_bld->zero, "");
1375
1376 unsigned sample_chan = target == TGSI_TEXTURE_2D_MSAA ? 2 : 3;
1377
1378 LLVMValueRef sample_index4 =
1379 LLVMBuildMul(gallivm->builder, address[sample_chan], four, "");
1380
1381 LLVMValueRef shifted_fmask =
1382 LLVMBuildLShr(gallivm->builder, fmask, sample_index4, "");
1383
1384 LLVMValueRef final_sample =
1385 LLVMBuildAnd(gallivm->builder, shifted_fmask, F, "");
1386
1387 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
1388 * resource descriptor is 0 (invalid),
1389 */
1390 LLVMValueRef fmask_desc =
1391 LLVMBuildBitCast(gallivm->builder,
1392 si_shader_ctx->resources[FMASK_TEX_OFFSET + sampler_index],
1393 LLVMVectorType(uint_bld->elem_type, 8), "");
1394
1395 LLVMValueRef fmask_word1 =
1396 LLVMBuildExtractElement(gallivm->builder, fmask_desc,
1397 uint_bld->one, "");
1398
1399 LLVMValueRef word1_is_nonzero =
1400 LLVMBuildICmp(gallivm->builder, LLVMIntNE,
1401 fmask_word1, uint_bld->zero, "");
1402
1403 /* Replace the MSAA sample index. */
1404 address[sample_chan] =
1405 LLVMBuildSelect(gallivm->builder, word1_is_nonzero,
1406 final_sample, address[sample_chan], "");
1407 }
1408
1409 /* Resource */
1410 emit_data->args[1] = si_shader_ctx->resources[sampler_index];
1411
1412 if (opcode == TGSI_OPCODE_TXF) {
1413 /* add tex offsets */
1414 if (inst->Texture.NumOffsets) {
1415 struct lp_build_context *uint_bld = &bld_base->uint_bld;
1416 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
1417 const struct tgsi_texture_offset * off = inst->TexOffsets;
1418
1419 assert(inst->Texture.NumOffsets == 1);
1420
1421 switch (target) {
1422 case TGSI_TEXTURE_3D:
1423 address[2] = lp_build_add(uint_bld, address[2],
1424 bld->immediates[off->Index][off->SwizzleZ]);
1425 /* fall through */
1426 case TGSI_TEXTURE_2D:
1427 case TGSI_TEXTURE_SHADOW2D:
1428 case TGSI_TEXTURE_RECT:
1429 case TGSI_TEXTURE_SHADOWRECT:
1430 case TGSI_TEXTURE_2D_ARRAY:
1431 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1432 address[1] =
1433 lp_build_add(uint_bld, address[1],
1434 bld->immediates[off->Index][off->SwizzleY]);
1435 /* fall through */
1436 case TGSI_TEXTURE_1D:
1437 case TGSI_TEXTURE_SHADOW1D:
1438 case TGSI_TEXTURE_1D_ARRAY:
1439 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1440 address[0] =
1441 lp_build_add(uint_bld, address[0],
1442 bld->immediates[off->Index][off->SwizzleX]);
1443 break;
1444 /* texture offsets do not apply to other texture targets */
1445 }
1446 }
1447
1448 emit_data->dst_type = LLVMVectorType(
1449 LLVMInt32TypeInContext(bld_base->base.gallivm->context),
1450 4);
1451
1452 emit_data->arg_count = 3;
1453 } else {
1454 /* Sampler */
1455 emit_data->args[2] = si_shader_ctx->samplers[sampler_index];
1456
1457 emit_data->dst_type = LLVMVectorType(
1458 LLVMFloatTypeInContext(bld_base->base.gallivm->context),
1459 4);
1460
1461 emit_data->arg_count = 4;
1462 }
1463
1464 /* Dimensions */
1465 emit_data->args[emit_data->arg_count - 1] =
1466 lp_build_const_int32(bld_base->base.gallivm, target);
1467
1468 /* Pad to power of two vector */
1469 while (count < util_next_power_of_two(count))
1470 address[count++] = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
1471
1472 emit_data->args[0] = lp_build_gather_values(gallivm, address, count);
1473 }
1474
1475 static void build_tex_intrinsic(const struct lp_build_tgsi_action * action,
1476 struct lp_build_tgsi_context * bld_base,
1477 struct lp_build_emit_data * emit_data)
1478 {
1479 struct lp_build_context * base = &bld_base->base;
1480 char intr_name[127];
1481
1482 if (emit_data->inst->Texture.Texture == TGSI_TEXTURE_BUFFER) {
1483 emit_data->output[emit_data->chan] = build_intrinsic(
1484 base->gallivm->builder,
1485 "llvm.SI.vs.load.input", emit_data->dst_type,
1486 emit_data->args, emit_data->arg_count,
1487 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1488 return;
1489 }
1490
1491 sprintf(intr_name, "%sv%ui32", action->intr_name,
1492 LLVMGetVectorSize(LLVMTypeOf(emit_data->args[0])));
1493
1494 emit_data->output[emit_data->chan] = build_intrinsic(
1495 base->gallivm->builder, intr_name, emit_data->dst_type,
1496 emit_data->args, emit_data->arg_count,
1497 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1498 }
1499
1500 static void txq_fetch_args(
1501 struct lp_build_tgsi_context * bld_base,
1502 struct lp_build_emit_data * emit_data)
1503 {
1504 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
1505 const struct tgsi_full_instruction *inst = emit_data->inst;
1506 struct gallivm_state *gallivm = bld_base->base.gallivm;
1507
1508 if (inst->Texture.Texture == TGSI_TEXTURE_BUFFER) {
1509 LLVMTypeRef i32 = LLVMInt32TypeInContext(gallivm->context);
1510 LLVMTypeRef v8i32 = LLVMVectorType(i32, 8);
1511
1512 /* Read the size from the buffer descriptor directly. */
1513 LLVMValueRef size = si_shader_ctx->resources[inst->Src[1].Register.Index];
1514 size = LLVMBuildBitCast(gallivm->builder, size, v8i32, "");
1515 size = LLVMBuildExtractElement(gallivm->builder, size,
1516 lp_build_const_int32(gallivm, 2), "");
1517 emit_data->args[0] = size;
1518 return;
1519 }
1520
1521 /* Mip level */
1522 emit_data->args[0] = lp_build_emit_fetch(bld_base, inst, 0, TGSI_CHAN_X);
1523
1524 /* Resource */
1525 emit_data->args[1] = si_shader_ctx->resources[inst->Src[1].Register.Index];
1526
1527 /* Dimensions */
1528 emit_data->args[2] = lp_build_const_int32(bld_base->base.gallivm,
1529 inst->Texture.Texture);
1530
1531 emit_data->arg_count = 3;
1532
1533 emit_data->dst_type = LLVMVectorType(
1534 LLVMInt32TypeInContext(bld_base->base.gallivm->context),
1535 4);
1536 }
1537
1538 static void build_txq_intrinsic(const struct lp_build_tgsi_action * action,
1539 struct lp_build_tgsi_context * bld_base,
1540 struct lp_build_emit_data * emit_data)
1541 {
1542 if (emit_data->inst->Texture.Texture == TGSI_TEXTURE_BUFFER) {
1543 /* Just return the buffer size. */
1544 emit_data->output[emit_data->chan] = emit_data->args[0];
1545 return;
1546 }
1547
1548 build_tgsi_intrinsic_nomem(action, bld_base, emit_data);
1549 }
1550
1551 #if HAVE_LLVM >= 0x0304
1552
1553 static void si_llvm_emit_ddxy(
1554 const struct lp_build_tgsi_action * action,
1555 struct lp_build_tgsi_context * bld_base,
1556 struct lp_build_emit_data * emit_data)
1557 {
1558 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
1559 struct gallivm_state *gallivm = bld_base->base.gallivm;
1560 struct lp_build_context * base = &bld_base->base;
1561 const struct tgsi_full_instruction *inst = emit_data->inst;
1562 unsigned opcode = inst->Instruction.Opcode;
1563 LLVMValueRef indices[2];
1564 LLVMValueRef store_ptr, load_ptr0, load_ptr1;
1565 LLVMValueRef tl, trbl, result[4];
1566 LLVMTypeRef i32;
1567 unsigned swizzle[4];
1568 unsigned c;
1569
1570 i32 = LLVMInt32TypeInContext(gallivm->context);
1571
1572 indices[0] = bld_base->uint_bld.zero;
1573 indices[1] = build_intrinsic(gallivm->builder, "llvm.SI.tid", i32,
1574 NULL, 0, LLVMReadNoneAttribute);
1575 store_ptr = LLVMBuildGEP(gallivm->builder, si_shader_ctx->ddxy_lds,
1576 indices, 2, "");
1577
1578 indices[1] = LLVMBuildAnd(gallivm->builder, indices[1],
1579 lp_build_const_int32(gallivm, 0xfffffffc), "");
1580 load_ptr0 = LLVMBuildGEP(gallivm->builder, si_shader_ctx->ddxy_lds,
1581 indices, 2, "");
1582
1583 indices[1] = LLVMBuildAdd(gallivm->builder, indices[1],
1584 lp_build_const_int32(gallivm,
1585 opcode == TGSI_OPCODE_DDX ? 1 : 2),
1586 "");
1587 load_ptr1 = LLVMBuildGEP(gallivm->builder, si_shader_ctx->ddxy_lds,
1588 indices, 2, "");
1589
1590 for (c = 0; c < 4; ++c) {
1591 unsigned i;
1592
1593 swizzle[c] = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], c);
1594 for (i = 0; i < c; ++i) {
1595 if (swizzle[i] == swizzle[c]) {
1596 result[c] = result[i];
1597 break;
1598 }
1599 }
1600 if (i != c)
1601 continue;
1602
1603 LLVMBuildStore(gallivm->builder,
1604 LLVMBuildBitCast(gallivm->builder,
1605 lp_build_emit_fetch(bld_base, inst, 0, c),
1606 i32, ""),
1607 store_ptr);
1608
1609 tl = LLVMBuildLoad(gallivm->builder, load_ptr0, "");
1610 tl = LLVMBuildBitCast(gallivm->builder, tl, base->elem_type, "");
1611
1612 trbl = LLVMBuildLoad(gallivm->builder, load_ptr1, "");
1613 trbl = LLVMBuildBitCast(gallivm->builder, trbl, base->elem_type, "");
1614
1615 result[c] = LLVMBuildFSub(gallivm->builder, trbl, tl, "");
1616 }
1617
1618 emit_data->output[0] = lp_build_gather_values(gallivm, result, 4);
1619 }
1620
1621 #endif /* HAVE_LLVM >= 0x0304 */
1622
1623 static const struct lp_build_tgsi_action tex_action = {
1624 .fetch_args = tex_fetch_args,
1625 .emit = build_tex_intrinsic,
1626 .intr_name = "llvm.SI.sample."
1627 };
1628
1629 static const struct lp_build_tgsi_action txb_action = {
1630 .fetch_args = tex_fetch_args,
1631 .emit = build_tex_intrinsic,
1632 .intr_name = "llvm.SI.sampleb."
1633 };
1634
1635 #if HAVE_LLVM >= 0x0304
1636 static const struct lp_build_tgsi_action txd_action = {
1637 .fetch_args = tex_fetch_args,
1638 .emit = build_tex_intrinsic,
1639 .intr_name = "llvm.SI.sampled."
1640 };
1641 #endif
1642
1643 static const struct lp_build_tgsi_action txf_action = {
1644 .fetch_args = tex_fetch_args,
1645 .emit = build_tex_intrinsic,
1646 .intr_name = "llvm.SI.imageload."
1647 };
1648
1649 static const struct lp_build_tgsi_action txl_action = {
1650 .fetch_args = tex_fetch_args,
1651 .emit = build_tex_intrinsic,
1652 .intr_name = "llvm.SI.samplel."
1653 };
1654
1655 static const struct lp_build_tgsi_action txq_action = {
1656 .fetch_args = txq_fetch_args,
1657 .emit = build_txq_intrinsic,
1658 .intr_name = "llvm.SI.resinfo"
1659 };
1660
1661 static void create_meta_data(struct si_shader_context *si_shader_ctx)
1662 {
1663 struct gallivm_state *gallivm = si_shader_ctx->radeon_bld.soa.bld_base.base.gallivm;
1664 LLVMValueRef args[3];
1665
1666 args[0] = LLVMMDStringInContext(gallivm->context, "const", 5);
1667 args[1] = 0;
1668 args[2] = lp_build_const_int32(gallivm, 1);
1669
1670 si_shader_ctx->const_md = LLVMMDNodeInContext(gallivm->context, args, 3);
1671 }
1672
1673 static void create_function(struct si_shader_context *si_shader_ctx)
1674 {
1675 struct lp_build_tgsi_context *bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
1676 struct gallivm_state *gallivm = bld_base->base.gallivm;
1677 LLVMTypeRef params[21], f32, i8, i32, v2i32, v3i32;
1678 unsigned i, last_sgpr, num_params;
1679
1680 i8 = LLVMInt8TypeInContext(gallivm->context);
1681 i32 = LLVMInt32TypeInContext(gallivm->context);
1682 f32 = LLVMFloatTypeInContext(gallivm->context);
1683 v2i32 = LLVMVectorType(i32, 2);
1684 v3i32 = LLVMVectorType(i32, 3);
1685
1686 params[SI_PARAM_CONST] = LLVMPointerType(
1687 LLVMArrayType(LLVMVectorType(i8, 16), NUM_CONST_BUFFERS), CONST_ADDR_SPACE);
1688 /* We assume at most 16 textures per program at the moment.
1689 * This need probably need to be changed to support bindless textures */
1690 params[SI_PARAM_SAMPLER] = LLVMPointerType(
1691 LLVMArrayType(LLVMVectorType(i8, 16), NUM_SAMPLER_VIEWS), CONST_ADDR_SPACE);
1692 params[SI_PARAM_RESOURCE] = LLVMPointerType(
1693 LLVMArrayType(LLVMVectorType(i8, 32), NUM_SAMPLER_STATES), CONST_ADDR_SPACE);
1694
1695 switch (si_shader_ctx->type) {
1696 case TGSI_PROCESSOR_VERTEX:
1697 params[SI_PARAM_VERTEX_BUFFER] = params[SI_PARAM_CONST];
1698 params[SI_PARAM_SO_BUFFER] = params[SI_PARAM_CONST];
1699 params[SI_PARAM_START_INSTANCE] = i32;
1700 num_params = SI_PARAM_START_INSTANCE+1;
1701
1702 /* The locations of the other parameters are assigned dynamically. */
1703
1704 /* Streamout SGPRs. */
1705 if (si_shader_ctx->shader->selector->so.num_outputs) {
1706 params[si_shader_ctx->param_streamout_config = num_params++] = i32;
1707 params[si_shader_ctx->param_streamout_write_index = num_params++] = i32;
1708 }
1709 /* A streamout buffer offset is loaded if the stride is non-zero. */
1710 for (i = 0; i < 4; i++) {
1711 if (!si_shader_ctx->shader->selector->so.stride[i])
1712 continue;
1713
1714 params[si_shader_ctx->param_streamout_offset[i] = num_params++] = i32;
1715 }
1716
1717 last_sgpr = num_params-1;
1718
1719 /* VGPRs */
1720 params[si_shader_ctx->param_vertex_id = num_params++] = i32;
1721 params[num_params++] = i32; /* unused*/
1722 params[num_params++] = i32; /* unused */
1723 params[si_shader_ctx->param_instance_id = num_params++] = i32;
1724 break;
1725
1726 case TGSI_PROCESSOR_FRAGMENT:
1727 params[SI_PARAM_ALPHA_REF] = f32;
1728 params[SI_PARAM_PRIM_MASK] = i32;
1729 last_sgpr = SI_PARAM_PRIM_MASK;
1730 params[SI_PARAM_PERSP_SAMPLE] = v2i32;
1731 params[SI_PARAM_PERSP_CENTER] = v2i32;
1732 params[SI_PARAM_PERSP_CENTROID] = v2i32;
1733 params[SI_PARAM_PERSP_PULL_MODEL] = v3i32;
1734 params[SI_PARAM_LINEAR_SAMPLE] = v2i32;
1735 params[SI_PARAM_LINEAR_CENTER] = v2i32;
1736 params[SI_PARAM_LINEAR_CENTROID] = v2i32;
1737 params[SI_PARAM_LINE_STIPPLE_TEX] = f32;
1738 params[SI_PARAM_POS_X_FLOAT] = f32;
1739 params[SI_PARAM_POS_Y_FLOAT] = f32;
1740 params[SI_PARAM_POS_Z_FLOAT] = f32;
1741 params[SI_PARAM_POS_W_FLOAT] = f32;
1742 params[SI_PARAM_FRONT_FACE] = f32;
1743 params[SI_PARAM_ANCILLARY] = f32;
1744 params[SI_PARAM_SAMPLE_COVERAGE] = f32;
1745 params[SI_PARAM_POS_FIXED_PT] = f32;
1746 num_params = SI_PARAM_POS_FIXED_PT+1;
1747 break;
1748
1749 default:
1750 assert(0 && "unimplemented shader");
1751 return;
1752 }
1753
1754 assert(num_params <= Elements(params));
1755 radeon_llvm_create_func(&si_shader_ctx->radeon_bld, params, num_params);
1756 radeon_llvm_shader_type(si_shader_ctx->radeon_bld.main_fn, si_shader_ctx->type);
1757
1758 for (i = 0; i <= last_sgpr; ++i) {
1759 LLVMValueRef P = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, i);
1760 switch (i) {
1761 default:
1762 LLVMAddAttribute(P, LLVMInRegAttribute);
1763 break;
1764 #if HAVE_LLVM >= 0x0304
1765 /* We tell llvm that array inputs are passed by value to allow Sinking pass
1766 * to move load. Inputs are constant so this is fine. */
1767 case SI_PARAM_CONST:
1768 case SI_PARAM_SAMPLER:
1769 case SI_PARAM_RESOURCE:
1770 LLVMAddAttribute(P, LLVMByValAttribute);
1771 break;
1772 #endif
1773 }
1774 }
1775
1776 #if HAVE_LLVM >= 0x0304
1777 if (bld_base->info->opcode_count[TGSI_OPCODE_DDX] > 0 ||
1778 bld_base->info->opcode_count[TGSI_OPCODE_DDY] > 0)
1779 si_shader_ctx->ddxy_lds =
1780 LLVMAddGlobalInAddressSpace(gallivm->module,
1781 LLVMArrayType(i32, 64),
1782 "ddxy_lds",
1783 LOCAL_ADDR_SPACE);
1784 #endif
1785 }
1786
1787 static void preload_constants(struct si_shader_context *si_shader_ctx)
1788 {
1789 struct lp_build_tgsi_context * bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
1790 struct gallivm_state * gallivm = bld_base->base.gallivm;
1791 const struct tgsi_shader_info * info = bld_base->info;
1792 unsigned buf;
1793 LLVMValueRef ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
1794
1795 for (buf = 0; buf < NUM_CONST_BUFFERS; buf++) {
1796 unsigned i, num_const = info->const_file_max[buf] + 1;
1797
1798 if (num_const == 0)
1799 continue;
1800
1801 /* Allocate space for the constant values */
1802 si_shader_ctx->constants[buf] = CALLOC(num_const * 4, sizeof(LLVMValueRef));
1803
1804 /* Load the resource descriptor */
1805 si_shader_ctx->const_resource[buf] =
1806 build_indexed_load(si_shader_ctx, ptr, lp_build_const_int32(gallivm, buf));
1807
1808 /* Load the constants, we rely on the code sinking to do the rest */
1809 for (i = 0; i < num_const * 4; ++i) {
1810 LLVMValueRef args[2] = {
1811 si_shader_ctx->const_resource[buf],
1812 lp_build_const_int32(gallivm, i * 4)
1813 };
1814 si_shader_ctx->constants[buf][i] =
1815 build_intrinsic(gallivm->builder, "llvm.SI.load.const",
1816 bld_base->base.elem_type, args, 2,
1817 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1818 }
1819 }
1820 }
1821
1822 static void preload_samplers(struct si_shader_context *si_shader_ctx)
1823 {
1824 struct lp_build_tgsi_context * bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
1825 struct gallivm_state * gallivm = bld_base->base.gallivm;
1826 const struct tgsi_shader_info * info = bld_base->info;
1827
1828 unsigned i, num_samplers = info->file_max[TGSI_FILE_SAMPLER] + 1;
1829
1830 LLVMValueRef res_ptr, samp_ptr;
1831 LLVMValueRef offset;
1832
1833 if (num_samplers == 0)
1834 return;
1835
1836 /* Allocate space for the values */
1837 si_shader_ctx->resources = CALLOC(NUM_SAMPLER_VIEWS, sizeof(LLVMValueRef));
1838 si_shader_ctx->samplers = CALLOC(num_samplers, sizeof(LLVMValueRef));
1839
1840 res_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_RESOURCE);
1841 samp_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_SAMPLER);
1842
1843 /* Load the resources and samplers, we rely on the code sinking to do the rest */
1844 for (i = 0; i < num_samplers; ++i) {
1845 /* Resource */
1846 offset = lp_build_const_int32(gallivm, i);
1847 si_shader_ctx->resources[i] = build_indexed_load(si_shader_ctx, res_ptr, offset);
1848
1849 /* Sampler */
1850 offset = lp_build_const_int32(gallivm, i);
1851 si_shader_ctx->samplers[i] = build_indexed_load(si_shader_ctx, samp_ptr, offset);
1852
1853 /* FMASK resource */
1854 if (info->is_msaa_sampler[i]) {
1855 offset = lp_build_const_int32(gallivm, FMASK_TEX_OFFSET + i);
1856 si_shader_ctx->resources[FMASK_TEX_OFFSET + i] =
1857 build_indexed_load(si_shader_ctx, res_ptr, offset);
1858 }
1859 }
1860 }
1861
1862 static void preload_streamout_buffers(struct si_shader_context *si_shader_ctx)
1863 {
1864 struct lp_build_tgsi_context * bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
1865 struct gallivm_state * gallivm = bld_base->base.gallivm;
1866 unsigned i;
1867
1868 if (!si_shader_ctx->shader->selector->so.num_outputs)
1869 return;
1870
1871 LLVMValueRef buf_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
1872 SI_PARAM_SO_BUFFER);
1873
1874 /* Load the resources, we rely on the code sinking to do the rest */
1875 for (i = 0; i < 4; ++i) {
1876 if (si_shader_ctx->shader->selector->so.stride[i]) {
1877 LLVMValueRef offset = lp_build_const_int32(gallivm, i);
1878
1879 si_shader_ctx->so_buffers[i] = build_indexed_load(si_shader_ctx, buf_ptr, offset);
1880 }
1881 }
1882 }
1883
1884 int si_compile_llvm(struct si_context *sctx, struct si_pipe_shader *shader,
1885 LLVMModuleRef mod)
1886 {
1887 unsigned i;
1888 uint32_t *ptr;
1889 struct radeon_llvm_binary binary;
1890 bool dump = r600_can_dump_shader(&sctx->screen->b,
1891 shader->selector ? shader->selector->tokens : NULL);
1892 memset(&binary, 0, sizeof(binary));
1893 radeon_llvm_compile(mod, &binary,
1894 r600_get_llvm_processor_name(sctx->screen->b.family), dump);
1895 if (dump && ! binary.disassembled) {
1896 fprintf(stderr, "SI CODE:\n");
1897 for (i = 0; i < binary.code_size; i+=4 ) {
1898 fprintf(stderr, "%02x%02x%02x%02x\n", binary.code[i + 3],
1899 binary.code[i + 2], binary.code[i + 1],
1900 binary.code[i]);
1901 }
1902 }
1903
1904 /* XXX: We may be able to emit some of these values directly rather than
1905 * extracting fields to be emitted later.
1906 */
1907 for (i = 0; i < binary.config_size; i+= 8) {
1908 unsigned reg = util_le32_to_cpu(*(uint32_t*)(binary.config + i));
1909 unsigned value = util_le32_to_cpu(*(uint32_t*)(binary.config + i + 4));
1910 switch (reg) {
1911 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
1912 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
1913 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
1914 case R_00B848_COMPUTE_PGM_RSRC1:
1915 shader->num_sgprs = (G_00B028_SGPRS(value) + 1) * 8;
1916 shader->num_vgprs = (G_00B028_VGPRS(value) + 1) * 4;
1917 break;
1918 case R_00B02C_SPI_SHADER_PGM_RSRC2_PS:
1919 shader->lds_size = G_00B02C_EXTRA_LDS_SIZE(value);
1920 break;
1921 case R_00B84C_COMPUTE_PGM_RSRC2:
1922 shader->lds_size = G_00B84C_LDS_SIZE(value);
1923 break;
1924 case R_0286CC_SPI_PS_INPUT_ENA:
1925 shader->spi_ps_input_ena = value;
1926 break;
1927 default:
1928 fprintf(stderr, "Warning: Compiler emitted unknown "
1929 "config register: 0x%x\n", reg);
1930 break;
1931 }
1932 }
1933
1934 /* copy new shader */
1935 r600_resource_reference(&shader->bo, NULL);
1936 shader->bo = si_resource_create_custom(sctx->b.b.screen, PIPE_USAGE_IMMUTABLE,
1937 binary.code_size);
1938 if (shader->bo == NULL) {
1939 return -ENOMEM;
1940 }
1941
1942 ptr = (uint32_t*)sctx->b.ws->buffer_map(shader->bo->cs_buf, sctx->b.rings.gfx.cs, PIPE_TRANSFER_WRITE);
1943 if (0 /*SI_BIG_ENDIAN*/) {
1944 for (i = 0; i < binary.code_size / 4; ++i) {
1945 ptr[i] = util_bswap32(*(uint32_t*)(binary.code + i*4));
1946 }
1947 } else {
1948 memcpy(ptr, binary.code, binary.code_size);
1949 }
1950 sctx->b.ws->buffer_unmap(shader->bo->cs_buf);
1951
1952 free(binary.code);
1953 free(binary.config);
1954
1955 return 0;
1956 }
1957
1958 int si_pipe_shader_create(
1959 struct pipe_context *ctx,
1960 struct si_pipe_shader *shader)
1961 {
1962 struct si_context *sctx = (struct si_context*)ctx;
1963 struct si_pipe_shader_selector *sel = shader->selector;
1964 struct si_shader_context si_shader_ctx;
1965 struct tgsi_shader_info shader_info;
1966 struct lp_build_tgsi_context * bld_base;
1967 LLVMModuleRef mod;
1968 int r = 0;
1969 bool dump = r600_can_dump_shader(&sctx->screen->b, shader->selector->tokens);
1970
1971 assert(shader->shader.noutput == 0);
1972 assert(shader->shader.ninterp == 0);
1973 assert(shader->shader.ninput == 0);
1974
1975 memset(&si_shader_ctx, 0, sizeof(si_shader_ctx));
1976 radeon_llvm_context_init(&si_shader_ctx.radeon_bld);
1977 bld_base = &si_shader_ctx.radeon_bld.soa.bld_base;
1978
1979 tgsi_scan_shader(sel->tokens, &shader_info);
1980
1981 shader->shader.uses_kill = shader_info.uses_kill;
1982 shader->shader.uses_instanceid = shader_info.uses_instanceid;
1983 bld_base->info = &shader_info;
1984 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
1985 bld_base->emit_epilogue = si_llvm_emit_epilogue;
1986
1987 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
1988 bld_base->op_actions[TGSI_OPCODE_TXB] = txb_action;
1989 #if HAVE_LLVM >= 0x0304
1990 bld_base->op_actions[TGSI_OPCODE_TXD] = txd_action;
1991 #endif
1992 bld_base->op_actions[TGSI_OPCODE_TXF] = txf_action;
1993 bld_base->op_actions[TGSI_OPCODE_TXL] = txl_action;
1994 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
1995 bld_base->op_actions[TGSI_OPCODE_TXQ] = txq_action;
1996
1997 #if HAVE_LLVM >= 0x0304
1998 bld_base->op_actions[TGSI_OPCODE_DDX].emit = si_llvm_emit_ddxy;
1999 bld_base->op_actions[TGSI_OPCODE_DDY].emit = si_llvm_emit_ddxy;
2000 #endif
2001
2002 si_shader_ctx.radeon_bld.load_input = declare_input;
2003 si_shader_ctx.radeon_bld.load_system_value = declare_system_value;
2004 si_shader_ctx.tokens = sel->tokens;
2005 tgsi_parse_init(&si_shader_ctx.parse, si_shader_ctx.tokens);
2006 si_shader_ctx.shader = shader;
2007 si_shader_ctx.type = si_shader_ctx.parse.FullHeader.Processor.Processor;
2008
2009 create_meta_data(&si_shader_ctx);
2010 create_function(&si_shader_ctx);
2011 preload_constants(&si_shader_ctx);
2012 preload_samplers(&si_shader_ctx);
2013 preload_streamout_buffers(&si_shader_ctx);
2014
2015 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
2016 * conversion fails. */
2017 if (dump) {
2018 tgsi_dump(sel->tokens, 0);
2019 si_dump_streamout(&sel->so);
2020 }
2021
2022 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
2023 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
2024 for (int i = 0; i < NUM_CONST_BUFFERS; i++)
2025 FREE(si_shader_ctx.constants[i]);
2026 FREE(si_shader_ctx.resources);
2027 FREE(si_shader_ctx.samplers);
2028 return -EINVAL;
2029 }
2030
2031 radeon_llvm_finalize_module(&si_shader_ctx.radeon_bld);
2032
2033 mod = bld_base->base.gallivm->module;
2034 r = si_compile_llvm(sctx, shader, mod);
2035
2036 radeon_llvm_dispose(&si_shader_ctx.radeon_bld);
2037 tgsi_parse_free(&si_shader_ctx.parse);
2038
2039 for (int i = 0; i < NUM_CONST_BUFFERS; i++)
2040 FREE(si_shader_ctx.constants[i]);
2041 FREE(si_shader_ctx.resources);
2042 FREE(si_shader_ctx.samplers);
2043
2044 return r;
2045 }
2046
2047 void si_pipe_shader_destroy(struct pipe_context *ctx, struct si_pipe_shader *shader)
2048 {
2049 r600_resource_reference(&shader->bo, NULL);
2050 }