radeonsi: Initial geometry shader support
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Tom Stellard <thomas.stellard@amd.com>
25 * Michel Dänzer <michel.daenzer@amd.com>
26 * Christian König <christian.koenig@amd.com>
27 */
28
29 #include "gallivm/lp_bld_const.h"
30 #include "gallivm/lp_bld_gather.h"
31 #include "gallivm/lp_bld_intr.h"
32 #include "gallivm/lp_bld_logic.h"
33 #include "gallivm/lp_bld_arit.h"
34 #include "gallivm/lp_bld_flow.h"
35 #include "radeon_llvm.h"
36 #include "radeon_llvm_emit.h"
37 #include "util/u_memory.h"
38 #include "tgsi/tgsi_parse.h"
39 #include "tgsi/tgsi_util.h"
40 #include "tgsi/tgsi_dump.h"
41
42 #include "si_pipe.h"
43 #include "si_shader.h"
44 #include "sid.h"
45
46 #include <errno.h>
47
48 struct si_shader_output_values
49 {
50 LLVMValueRef values[4];
51 unsigned name;
52 unsigned index;
53 unsigned usage;
54 };
55
56 struct si_shader_context
57 {
58 struct radeon_llvm_context radeon_bld;
59 struct tgsi_parse_context parse;
60 struct tgsi_token * tokens;
61 struct si_pipe_shader *shader;
62 struct si_shader *gs_for_vs;
63 unsigned type; /* TGSI_PROCESSOR_* specifies the type of shader. */
64 unsigned gs_next_vertex;
65 int param_streamout_config;
66 int param_streamout_write_index;
67 int param_streamout_offset[4];
68 int param_vertex_id;
69 int param_instance_id;
70 LLVMValueRef const_md;
71 LLVMValueRef const_resource[NUM_CONST_BUFFERS];
72 #if HAVE_LLVM >= 0x0304
73 LLVMValueRef ddxy_lds;
74 #endif
75 LLVMValueRef *constants[NUM_CONST_BUFFERS];
76 LLVMValueRef *resources;
77 LLVMValueRef *samplers;
78 LLVMValueRef so_buffers[4];
79 };
80
81 static struct si_shader_context * si_shader_context(
82 struct lp_build_tgsi_context * bld_base)
83 {
84 return (struct si_shader_context *)bld_base;
85 }
86
87
88 #define PERSPECTIVE_BASE 0
89 #define LINEAR_BASE 9
90
91 #define SAMPLE_OFFSET 0
92 #define CENTER_OFFSET 2
93 #define CENTROID_OFSET 4
94
95 #define USE_SGPR_MAX_SUFFIX_LEN 5
96 #define CONST_ADDR_SPACE 2
97 #define LOCAL_ADDR_SPACE 3
98 #define USER_SGPR_ADDR_SPACE 8
99
100
101 #define SENDMSG_GS 2
102 #define SENDMSG_GS_DONE 3
103
104 #define SENDMSG_GS_OP_NOP (0 << 4)
105 #define SENDMSG_GS_OP_CUT (1 << 4)
106 #define SENDMSG_GS_OP_EMIT (2 << 4)
107 #define SENDMSG_GS_OP_EMIT_CUT (3 << 4)
108
109
110 /**
111 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad
112 *
113 * @param offset The offset parameter specifies the number of
114 * elements to offset, not the number of bytes or dwords. An element is the
115 * the type pointed to by the base_ptr parameter (e.g. int is the element of
116 * an int* pointer)
117 *
118 * When LLVM lowers the load instruction, it will convert the element offset
119 * into a dword offset automatically.
120 *
121 */
122 static LLVMValueRef build_indexed_load(
123 struct si_shader_context * si_shader_ctx,
124 LLVMValueRef base_ptr,
125 LLVMValueRef offset)
126 {
127 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
128
129 LLVMValueRef indices[2] = {
130 LLVMConstInt(LLVMInt64TypeInContext(base->gallivm->context), 0, false),
131 offset
132 };
133 LLVMValueRef computed_ptr = LLVMBuildGEP(
134 base->gallivm->builder, base_ptr, indices, 2, "");
135
136 LLVMValueRef result = LLVMBuildLoad(base->gallivm->builder, computed_ptr, "");
137 LLVMSetMetadata(result, 1, si_shader_ctx->const_md);
138 return result;
139 }
140
141 static LLVMValueRef get_instance_index_for_fetch(
142 struct radeon_llvm_context * radeon_bld,
143 unsigned divisor)
144 {
145 struct si_shader_context *si_shader_ctx =
146 si_shader_context(&radeon_bld->soa.bld_base);
147 struct gallivm_state * gallivm = radeon_bld->soa.bld_base.base.gallivm;
148
149 LLVMValueRef result = LLVMGetParam(radeon_bld->main_fn,
150 si_shader_ctx->param_instance_id);
151 result = LLVMBuildAdd(gallivm->builder, result, LLVMGetParam(
152 radeon_bld->main_fn, SI_PARAM_START_INSTANCE), "");
153
154 if (divisor > 1)
155 result = LLVMBuildUDiv(gallivm->builder, result,
156 lp_build_const_int32(gallivm, divisor), "");
157
158 return result;
159 }
160
161 static void declare_input_vs(
162 struct radeon_llvm_context *radeon_bld,
163 unsigned input_index,
164 const struct tgsi_full_declaration *decl)
165 {
166 struct lp_build_context *base = &radeon_bld->soa.bld_base.base;
167 struct gallivm_state *gallivm = base->gallivm;
168 struct si_shader_context *si_shader_ctx =
169 si_shader_context(&radeon_bld->soa.bld_base);
170 unsigned divisor = si_shader_ctx->shader->key.vs.instance_divisors[input_index];
171
172 unsigned chan;
173
174 LLVMValueRef t_list_ptr;
175 LLVMValueRef t_offset;
176 LLVMValueRef t_list;
177 LLVMValueRef attribute_offset;
178 LLVMValueRef buffer_index;
179 LLVMValueRef args[3];
180 LLVMTypeRef vec4_type;
181 LLVMValueRef input;
182
183 /* Load the T list */
184 t_list_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_VERTEX_BUFFER);
185
186 t_offset = lp_build_const_int32(gallivm, input_index);
187
188 t_list = build_indexed_load(si_shader_ctx, t_list_ptr, t_offset);
189
190 /* Build the attribute offset */
191 attribute_offset = lp_build_const_int32(gallivm, 0);
192
193 if (divisor) {
194 /* Build index from instance ID, start instance and divisor */
195 si_shader_ctx->shader->shader.uses_instanceid = true;
196 buffer_index = get_instance_index_for_fetch(&si_shader_ctx->radeon_bld, divisor);
197 } else {
198 /* Load the buffer index, which is always stored in VGPR0
199 * for Vertex Shaders */
200 buffer_index = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
201 si_shader_ctx->param_vertex_id);
202 }
203
204 vec4_type = LLVMVectorType(base->elem_type, 4);
205 args[0] = t_list;
206 args[1] = attribute_offset;
207 args[2] = buffer_index;
208 input = build_intrinsic(gallivm->builder,
209 "llvm.SI.vs.load.input", vec4_type, args, 3,
210 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
211
212 /* Break up the vec4 into individual components */
213 for (chan = 0; chan < 4; chan++) {
214 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
215 /* XXX: Use a helper function for this. There is one in
216 * tgsi_llvm.c. */
217 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, chan)] =
218 LLVMBuildExtractElement(gallivm->builder,
219 input, llvm_chan, "");
220 }
221 }
222
223 static void declare_input_gs(
224 struct radeon_llvm_context *radeon_bld,
225 unsigned input_index,
226 const struct tgsi_full_declaration *decl)
227 {
228 /* Nothing to do, inputs are handled in fetch_input_gs() below */
229 }
230
231 static LLVMValueRef fetch_input_gs(
232 struct lp_build_tgsi_context *bld_base,
233 const struct tgsi_full_src_register *reg,
234 enum tgsi_opcode_type type,
235 unsigned swizzle)
236 {
237 struct lp_build_context *base = &bld_base->base;
238 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
239 struct lp_build_context *uint = &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
240 struct gallivm_state *gallivm = base->gallivm;
241 LLVMTypeRef i32 = LLVMInt32TypeInContext(gallivm->context);
242 LLVMValueRef vtx_offset;
243 LLVMValueRef t_list_ptr;
244 LLVMValueRef t_list;
245 LLVMValueRef args[9];
246 unsigned vtx_offset_param;
247
248 if (!reg->Register.Dimension)
249 return NULL;
250
251 if (swizzle == ~0) {
252 LLVMValueRef values[TGSI_NUM_CHANNELS];
253 unsigned chan;
254 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
255 values[chan] = fetch_input_gs(bld_base, reg, type, chan);
256 }
257 return lp_build_gather_values(bld_base->base.gallivm, values,
258 TGSI_NUM_CHANNELS);
259 }
260
261 /* Get the vertex offset parameter */
262 vtx_offset_param = reg->Dimension.Index;
263 if (vtx_offset_param < 2) {
264 vtx_offset_param += SI_PARAM_VTX0_OFFSET;
265 } else {
266 assert(vtx_offset_param < 6);
267 vtx_offset_param += SI_PARAM_VTX2_OFFSET - 2;
268 }
269 vtx_offset = lp_build_mul_imm(uint,
270 LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
271 vtx_offset_param),
272 4);
273
274 /* Load the ESGS ring resource descriptor */
275 t_list_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
276 t_list = build_indexed_load(si_shader_ctx, t_list_ptr,
277 lp_build_const_int32(gallivm,
278 NUM_PIPE_CONST_BUFFERS + 1));
279
280 args[0] = t_list;
281 args[1] = vtx_offset;
282 args[2] = lp_build_const_int32(gallivm,
283 ((reg->Register.Index * 4) + swizzle) * 256);
284 args[3] = uint->zero;
285 args[4] = uint->one; /* OFFEN */
286 args[5] = uint->zero; /* IDXEN */
287 args[6] = uint->one; /* GLC */
288 args[7] = uint->zero; /* SLC */
289 args[8] = uint->zero; /* TFE */
290
291 return LLVMBuildBitCast(gallivm->builder,
292 build_intrinsic(gallivm->builder,
293 "llvm.SI.buffer.load.dword.i32.i32",
294 i32, args, 9,
295 LLVMReadOnlyAttribute | LLVMNoUnwindAttribute),
296 tgsi2llvmtype(bld_base, type), "");
297 }
298
299 static void declare_input_fs(
300 struct radeon_llvm_context *radeon_bld,
301 unsigned input_index,
302 const struct tgsi_full_declaration *decl)
303 {
304 struct lp_build_context *base = &radeon_bld->soa.bld_base.base;
305 struct si_shader_context *si_shader_ctx =
306 si_shader_context(&radeon_bld->soa.bld_base);
307 struct si_shader *shader = &si_shader_ctx->shader->shader;
308 struct lp_build_context *uint = &radeon_bld->soa.bld_base.uint_bld;
309 struct gallivm_state *gallivm = base->gallivm;
310 LLVMTypeRef input_type = LLVMFloatTypeInContext(gallivm->context);
311 LLVMValueRef main_fn = radeon_bld->main_fn;
312
313 LLVMValueRef interp_param;
314 const char * intr_name;
315
316 /* This value is:
317 * [15:0] NewPrimMask (Bit mask for each quad. It is set it the
318 * quad begins a new primitive. Bit 0 always needs
319 * to be unset)
320 * [32:16] ParamOffset
321 *
322 */
323 LLVMValueRef params = LLVMGetParam(main_fn, SI_PARAM_PRIM_MASK);
324 LLVMValueRef attr_number;
325
326 unsigned chan;
327
328 if (decl->Semantic.Name == TGSI_SEMANTIC_POSITION) {
329 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
330 unsigned soa_index =
331 radeon_llvm_reg_index_soa(input_index, chan);
332 radeon_bld->inputs[soa_index] =
333 LLVMGetParam(main_fn, SI_PARAM_POS_X_FLOAT + chan);
334
335 if (chan == 3)
336 /* RCP for fragcoord.w */
337 radeon_bld->inputs[soa_index] =
338 LLVMBuildFDiv(gallivm->builder,
339 lp_build_const_float(gallivm, 1.0f),
340 radeon_bld->inputs[soa_index],
341 "");
342 }
343 return;
344 }
345
346 if (decl->Semantic.Name == TGSI_SEMANTIC_FACE) {
347 LLVMValueRef face, is_face_positive;
348
349 face = LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE);
350
351 is_face_positive = LLVMBuildFCmp(gallivm->builder,
352 LLVMRealUGT, face,
353 lp_build_const_float(gallivm, 0.0f),
354 "");
355
356 radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 0)] =
357 LLVMBuildSelect(gallivm->builder,
358 is_face_positive,
359 lp_build_const_float(gallivm, 1.0f),
360 lp_build_const_float(gallivm, 0.0f),
361 "");
362 radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 1)] =
363 radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 2)] =
364 lp_build_const_float(gallivm, 0.0f);
365 radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 3)] =
366 lp_build_const_float(gallivm, 1.0f);
367
368 return;
369 }
370
371 shader->input[input_index].param_offset = shader->ninterp++;
372 attr_number = lp_build_const_int32(gallivm,
373 shader->input[input_index].param_offset);
374
375 switch (decl->Interp.Interpolate) {
376 case TGSI_INTERPOLATE_COLOR:
377 if (si_shader_ctx->shader->key.ps.flatshade) {
378 interp_param = 0;
379 } else {
380 if (decl->Interp.Centroid)
381 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTROID);
382 else
383 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTER);
384 }
385 break;
386 case TGSI_INTERPOLATE_CONSTANT:
387 interp_param = 0;
388 break;
389 case TGSI_INTERPOLATE_LINEAR:
390 if (decl->Interp.Centroid)
391 interp_param = LLVMGetParam(main_fn, SI_PARAM_LINEAR_CENTROID);
392 else
393 interp_param = LLVMGetParam(main_fn, SI_PARAM_LINEAR_CENTER);
394 break;
395 case TGSI_INTERPOLATE_PERSPECTIVE:
396 if (decl->Interp.Centroid)
397 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTROID);
398 else
399 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTER);
400 break;
401 default:
402 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
403 return;
404 }
405
406 intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
407
408 /* XXX: Could there be more than TGSI_NUM_CHANNELS (4) ? */
409 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR &&
410 si_shader_ctx->shader->key.ps.color_two_side) {
411 LLVMValueRef args[4];
412 LLVMValueRef face, is_face_positive;
413 LLVMValueRef back_attr_number =
414 lp_build_const_int32(gallivm,
415 shader->input[input_index].param_offset + 1);
416
417 face = LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE);
418
419 is_face_positive = LLVMBuildFCmp(gallivm->builder,
420 LLVMRealUGT, face,
421 lp_build_const_float(gallivm, 0.0f),
422 "");
423
424 args[2] = params;
425 args[3] = interp_param;
426 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
427 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
428 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
429 LLVMValueRef front, back;
430
431 args[0] = llvm_chan;
432 args[1] = attr_number;
433 front = build_intrinsic(gallivm->builder, intr_name,
434 input_type, args, args[3] ? 4 : 3,
435 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
436
437 args[1] = back_attr_number;
438 back = build_intrinsic(gallivm->builder, intr_name,
439 input_type, args, args[3] ? 4 : 3,
440 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
441
442 radeon_bld->inputs[soa_index] =
443 LLVMBuildSelect(gallivm->builder,
444 is_face_positive,
445 front,
446 back,
447 "");
448 }
449
450 shader->ninterp++;
451 } else if (decl->Semantic.Name == TGSI_SEMANTIC_FOG) {
452 LLVMValueRef args[4];
453
454 args[0] = uint->zero;
455 args[1] = attr_number;
456 args[2] = params;
457 args[3] = interp_param;
458 radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 0)] =
459 build_intrinsic(gallivm->builder, intr_name,
460 input_type, args, args[3] ? 4 : 3,
461 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
462 radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 1)] =
463 radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 2)] =
464 lp_build_const_float(gallivm, 0.0f);
465 radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 3)] =
466 lp_build_const_float(gallivm, 1.0f);
467 } else {
468 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
469 LLVMValueRef args[4];
470 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
471 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
472 args[0] = llvm_chan;
473 args[1] = attr_number;
474 args[2] = params;
475 args[3] = interp_param;
476 radeon_bld->inputs[soa_index] =
477 build_intrinsic(gallivm->builder, intr_name,
478 input_type, args, args[3] ? 4 : 3,
479 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
480 }
481 }
482 }
483
484 static void declare_system_value(
485 struct radeon_llvm_context * radeon_bld,
486 unsigned index,
487 const struct tgsi_full_declaration *decl)
488 {
489 struct si_shader_context *si_shader_ctx =
490 si_shader_context(&radeon_bld->soa.bld_base);
491 LLVMValueRef value = 0;
492
493 switch (decl->Semantic.Name) {
494 case TGSI_SEMANTIC_INSTANCEID:
495 value = LLVMGetParam(radeon_bld->main_fn,
496 si_shader_ctx->param_instance_id);
497 break;
498
499 case TGSI_SEMANTIC_VERTEXID:
500 value = LLVMGetParam(radeon_bld->main_fn,
501 si_shader_ctx->param_vertex_id);
502 break;
503
504 default:
505 assert(!"unknown system value");
506 return;
507 }
508
509 radeon_bld->system_values[index] = value;
510 }
511
512 static LLVMValueRef fetch_constant(
513 struct lp_build_tgsi_context * bld_base,
514 const struct tgsi_full_src_register *reg,
515 enum tgsi_opcode_type type,
516 unsigned swizzle)
517 {
518 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
519 struct lp_build_context * base = &bld_base->base;
520 const struct tgsi_ind_register *ireg = &reg->Indirect;
521 unsigned buf, idx;
522
523 LLVMValueRef args[2];
524 LLVMValueRef addr;
525 LLVMValueRef result;
526
527 if (swizzle == LP_CHAN_ALL) {
528 unsigned chan;
529 LLVMValueRef values[4];
530 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
531 values[chan] = fetch_constant(bld_base, reg, type, chan);
532
533 return lp_build_gather_values(bld_base->base.gallivm, values, 4);
534 }
535
536 buf = reg->Register.Dimension ? reg->Dimension.Index : 0;
537 idx = reg->Register.Index * 4 + swizzle;
538
539 if (!reg->Register.Indirect)
540 return bitcast(bld_base, type, si_shader_ctx->constants[buf][idx]);
541
542 args[0] = si_shader_ctx->const_resource[buf];
543 args[1] = lp_build_const_int32(base->gallivm, idx * 4);
544 addr = si_shader_ctx->radeon_bld.soa.addr[ireg->Index][ireg->Swizzle];
545 addr = LLVMBuildLoad(base->gallivm->builder, addr, "load addr reg");
546 addr = lp_build_mul_imm(&bld_base->uint_bld, addr, 16);
547 args[1] = lp_build_add(&bld_base->uint_bld, addr, args[1]);
548
549 result = build_intrinsic(base->gallivm->builder, "llvm.SI.load.const", base->elem_type,
550 args, 2, LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
551
552 return bitcast(bld_base, type, result);
553 }
554
555 /* Initialize arguments for the shader export intrinsic */
556 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
557 LLVMValueRef *values,
558 unsigned target,
559 LLVMValueRef *args)
560 {
561 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
562 struct lp_build_context *uint =
563 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
564 struct lp_build_context *base = &bld_base->base;
565 unsigned compressed = 0;
566 unsigned chan;
567
568 if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
569 int cbuf = target - V_008DFC_SQ_EXP_MRT;
570
571 if (cbuf >= 0 && cbuf < 8) {
572 compressed = (si_shader_ctx->shader->key.ps.export_16bpc >> cbuf) & 0x1;
573
574 if (compressed)
575 si_shader_ctx->shader->spi_shader_col_format |=
576 V_028714_SPI_SHADER_FP16_ABGR << (4 * cbuf);
577 else
578 si_shader_ctx->shader->spi_shader_col_format |=
579 V_028714_SPI_SHADER_32_ABGR << (4 * cbuf);
580
581 si_shader_ctx->shader->cb_shader_mask |= 0xf << (4 * cbuf);
582 }
583 }
584
585 if (compressed) {
586 /* Pixel shader needs to pack output values before export */
587 for (chan = 0; chan < 2; chan++ ) {
588 args[0] = values[2 * chan];
589 args[1] = values[2 * chan + 1];
590 args[chan + 5] =
591 build_intrinsic(base->gallivm->builder,
592 "llvm.SI.packf16",
593 LLVMInt32TypeInContext(base->gallivm->context),
594 args, 2,
595 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
596 args[chan + 7] = args[chan + 5] =
597 LLVMBuildBitCast(base->gallivm->builder,
598 args[chan + 5],
599 LLVMFloatTypeInContext(base->gallivm->context),
600 "");
601 }
602
603 /* Set COMPR flag */
604 args[4] = uint->one;
605 } else {
606 for (chan = 0; chan < 4; chan++ )
607 /* +5 because the first output value will be
608 * the 6th argument to the intrinsic. */
609 args[chan + 5] = values[chan];
610
611 /* Clear COMPR flag */
612 args[4] = uint->zero;
613 }
614
615 /* XXX: This controls which components of the output
616 * registers actually get exported. (e.g bit 0 means export
617 * X component, bit 1 means export Y component, etc.) I'm
618 * hard coding this to 0xf for now. In the future, we might
619 * want to do something else. */
620 args[0] = lp_build_const_int32(base->gallivm, 0xf);
621
622 /* Specify whether the EXEC mask represents the valid mask */
623 args[1] = uint->zero;
624
625 /* Specify whether this is the last export */
626 args[2] = uint->zero;
627
628 /* Specify the target we are exporting */
629 args[3] = lp_build_const_int32(base->gallivm, target);
630
631 /* XXX: We probably need to keep track of the output
632 * values, so we know what we are passing to the next
633 * stage. */
634 }
635
636 /* Load from output pointers and initialize arguments for the shader export intrinsic */
637 static void si_llvm_init_export_args_load(struct lp_build_tgsi_context *bld_base,
638 LLVMValueRef *out_ptr,
639 unsigned target,
640 LLVMValueRef *args)
641 {
642 struct gallivm_state *gallivm = bld_base->base.gallivm;
643 LLVMValueRef values[4];
644 int i;
645
646 for (i = 0; i < 4; i++)
647 values[i] = LLVMBuildLoad(gallivm->builder, out_ptr[i], "");
648
649 si_llvm_init_export_args(bld_base, values, target, args);
650 }
651
652 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
653 LLVMValueRef *out_ptr)
654 {
655 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
656 struct gallivm_state *gallivm = bld_base->base.gallivm;
657
658 if (si_shader_ctx->shader->key.ps.alpha_func != PIPE_FUNC_NEVER) {
659 LLVMValueRef alpha_ref = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
660 SI_PARAM_ALPHA_REF);
661
662 LLVMValueRef alpha_pass =
663 lp_build_cmp(&bld_base->base,
664 si_shader_ctx->shader->key.ps.alpha_func,
665 LLVMBuildLoad(gallivm->builder, out_ptr[3], ""),
666 alpha_ref);
667 LLVMValueRef arg =
668 lp_build_select(&bld_base->base,
669 alpha_pass,
670 lp_build_const_float(gallivm, 1.0f),
671 lp_build_const_float(gallivm, -1.0f));
672
673 build_intrinsic(gallivm->builder,
674 "llvm.AMDGPU.kill",
675 LLVMVoidTypeInContext(gallivm->context),
676 &arg, 1, 0);
677 } else {
678 build_intrinsic(gallivm->builder,
679 "llvm.AMDGPU.kilp",
680 LLVMVoidTypeInContext(gallivm->context),
681 NULL, 0, 0);
682 }
683 }
684
685 static void si_llvm_emit_clipvertex(struct lp_build_tgsi_context * bld_base,
686 LLVMValueRef (*pos)[9], LLVMValueRef *out_elts)
687 {
688 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
689 struct si_pipe_shader *shader = si_shader_ctx->shader;
690 struct lp_build_context *base = &bld_base->base;
691 struct lp_build_context *uint = &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
692 unsigned reg_index;
693 unsigned chan;
694 unsigned const_chan;
695 LLVMValueRef base_elt;
696 LLVMValueRef ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
697 LLVMValueRef constbuf_index = lp_build_const_int32(base->gallivm, NUM_PIPE_CONST_BUFFERS);
698 LLVMValueRef const_resource = build_indexed_load(si_shader_ctx, ptr, constbuf_index);
699
700 for (reg_index = 0; reg_index < 2; reg_index ++) {
701 LLVMValueRef *args = pos[2 + reg_index];
702
703 if (!(shader->key.vs.ucps_enabled & (1 << reg_index)))
704 continue;
705
706 shader->shader.clip_dist_write |= 0xf << (4 * reg_index);
707
708 args[5] =
709 args[6] =
710 args[7] =
711 args[8] = lp_build_const_float(base->gallivm, 0.0f);
712
713 /* Compute dot products of position and user clip plane vectors */
714 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
715 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
716 args[0] = const_resource;
717 args[1] = lp_build_const_int32(base->gallivm,
718 ((reg_index * 4 + chan) * 4 +
719 const_chan) * 4);
720 base_elt = build_intrinsic(base->gallivm->builder,
721 "llvm.SI.load.const",
722 base->elem_type,
723 args, 2,
724 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
725 args[5 + chan] =
726 lp_build_add(base, args[5 + chan],
727 lp_build_mul(base, base_elt,
728 out_elts[const_chan]));
729 }
730 }
731
732 args[0] = lp_build_const_int32(base->gallivm, 0xf);
733 args[1] = uint->zero;
734 args[2] = uint->zero;
735 args[3] = lp_build_const_int32(base->gallivm,
736 V_008DFC_SQ_EXP_POS + 2 + reg_index);
737 args[4] = uint->zero;
738 }
739 }
740
741 static void si_dump_streamout(struct pipe_stream_output_info *so)
742 {
743 unsigned i;
744
745 if (so->num_outputs)
746 fprintf(stderr, "STREAMOUT\n");
747
748 for (i = 0; i < so->num_outputs; i++) {
749 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
750 so->output[i].start_component;
751 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
752 i, so->output[i].output_buffer,
753 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
754 so->output[i].register_index,
755 mask & 1 ? "x" : "",
756 mask & 2 ? "y" : "",
757 mask & 4 ? "z" : "",
758 mask & 8 ? "w" : "");
759 }
760 }
761
762 /* TBUFFER_STORE_FORMAT_{X,XY,XYZ,XYZW} <- the suffix is selected by num_channels=1..4.
763 * The type of vdata must be one of i32 (num_channels=1), v2i32 (num_channels=2),
764 * or v4i32 (num_channels=3,4). */
765 static void build_tbuffer_store(struct si_shader_context *shader,
766 LLVMValueRef rsrc,
767 LLVMValueRef vdata,
768 unsigned num_channels,
769 LLVMValueRef vaddr,
770 LLVMValueRef soffset,
771 unsigned inst_offset,
772 unsigned dfmt,
773 unsigned nfmt,
774 unsigned offen,
775 unsigned idxen,
776 unsigned glc,
777 unsigned slc,
778 unsigned tfe)
779 {
780 struct gallivm_state *gallivm = &shader->radeon_bld.gallivm;
781 LLVMTypeRef i32 = LLVMInt32TypeInContext(gallivm->context);
782 LLVMValueRef args[] = {
783 rsrc,
784 vdata,
785 LLVMConstInt(i32, num_channels, 0),
786 vaddr,
787 soffset,
788 LLVMConstInt(i32, inst_offset, 0),
789 LLVMConstInt(i32, dfmt, 0),
790 LLVMConstInt(i32, nfmt, 0),
791 LLVMConstInt(i32, offen, 0),
792 LLVMConstInt(i32, idxen, 0),
793 LLVMConstInt(i32, glc, 0),
794 LLVMConstInt(i32, slc, 0),
795 LLVMConstInt(i32, tfe, 0)
796 };
797
798 /* The intrinsic is overloaded, we need to add a type suffix for overloading to work. */
799 unsigned func = CLAMP(num_channels, 1, 3) - 1;
800 const char *types[] = {"i32", "v2i32", "v4i32"};
801 char name[256];
802 snprintf(name, sizeof(name), "llvm.SI.tbuffer.store.%s", types[func]);
803
804 lp_build_intrinsic(gallivm->builder, name,
805 LLVMVoidTypeInContext(gallivm->context),
806 args, Elements(args));
807 }
808
809 static void build_streamout_store(struct si_shader_context *shader,
810 LLVMValueRef rsrc,
811 LLVMValueRef vdata,
812 unsigned num_channels,
813 LLVMValueRef vaddr,
814 LLVMValueRef soffset,
815 unsigned inst_offset)
816 {
817 static unsigned dfmt[] = {
818 V_008F0C_BUF_DATA_FORMAT_32,
819 V_008F0C_BUF_DATA_FORMAT_32_32,
820 V_008F0C_BUF_DATA_FORMAT_32_32_32,
821 V_008F0C_BUF_DATA_FORMAT_32_32_32_32
822 };
823 assert(num_channels >= 1 && num_channels <= 4);
824
825 build_tbuffer_store(shader, rsrc, vdata, num_channels, vaddr, soffset,
826 inst_offset, dfmt[num_channels-1],
827 V_008F0C_BUF_NUM_FORMAT_UINT, 1, 0, 1, 1, 0);
828 }
829
830 /* On SI, the vertex shader is responsible for writing streamout data
831 * to buffers. */
832 static void si_llvm_emit_streamout(struct si_shader_context *shader)
833 {
834 struct pipe_stream_output_info *so = &shader->shader->selector->so;
835 struct gallivm_state *gallivm = &shader->radeon_bld.gallivm;
836 LLVMBuilderRef builder = gallivm->builder;
837 int i, j;
838 struct lp_build_if_state if_ctx;
839
840 LLVMTypeRef i32 = LLVMInt32TypeInContext(gallivm->context);
841
842 LLVMValueRef so_param =
843 LLVMGetParam(shader->radeon_bld.main_fn,
844 shader->param_streamout_config);
845
846 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
847 LLVMValueRef so_vtx_count =
848 LLVMBuildAnd(builder,
849 LLVMBuildLShr(builder, so_param,
850 LLVMConstInt(i32, 16, 0), ""),
851 LLVMConstInt(i32, 127, 0), "");
852
853 LLVMValueRef tid = build_intrinsic(builder, "llvm.SI.tid", i32,
854 NULL, 0, LLVMReadNoneAttribute);
855
856 /* can_emit = tid < so_vtx_count; */
857 LLVMValueRef can_emit =
858 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
859
860 /* Emit the streamout code conditionally. This actually avoids
861 * out-of-bounds buffer access. The hw tells us via the SGPR
862 * (so_vtx_count) which threads are allowed to emit streamout data. */
863 lp_build_if(&if_ctx, gallivm, can_emit);
864 {
865 /* The buffer offset is computed as follows:
866 * ByteOffset = streamout_offset[buffer_id]*4 +
867 * (streamout_write_index + thread_id)*stride[buffer_id] +
868 * attrib_offset
869 */
870
871 LLVMValueRef so_write_index =
872 LLVMGetParam(shader->radeon_bld.main_fn,
873 shader->param_streamout_write_index);
874
875 /* Compute (streamout_write_index + thread_id). */
876 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
877
878 /* Compute the write offset for each enabled buffer. */
879 LLVMValueRef so_write_offset[4] = {};
880 for (i = 0; i < 4; i++) {
881 if (!so->stride[i])
882 continue;
883
884 LLVMValueRef so_offset = LLVMGetParam(shader->radeon_bld.main_fn,
885 shader->param_streamout_offset[i]);
886 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(i32, 4, 0), "");
887
888 so_write_offset[i] = LLVMBuildMul(builder, so_write_index,
889 LLVMConstInt(i32, so->stride[i]*4, 0), "");
890 so_write_offset[i] = LLVMBuildAdd(builder, so_write_offset[i], so_offset, "");
891 }
892
893 LLVMValueRef (*outputs)[TGSI_NUM_CHANNELS] = shader->radeon_bld.soa.outputs;
894
895 /* Write streamout data. */
896 for (i = 0; i < so->num_outputs; i++) {
897 unsigned buf_idx = so->output[i].output_buffer;
898 unsigned reg = so->output[i].register_index;
899 unsigned start = so->output[i].start_component;
900 unsigned num_comps = so->output[i].num_components;
901 LLVMValueRef out[4];
902
903 assert(num_comps && num_comps <= 4);
904 if (!num_comps || num_comps > 4)
905 continue;
906
907 /* Load the output as int. */
908 for (j = 0; j < num_comps; j++) {
909 out[j] = LLVMBuildLoad(builder, outputs[reg][start+j], "");
910 out[j] = LLVMBuildBitCast(builder, out[j], i32, "");
911 }
912
913 /* Pack the output. */
914 LLVMValueRef vdata = NULL;
915
916 switch (num_comps) {
917 case 1: /* as i32 */
918 vdata = out[0];
919 break;
920 case 2: /* as v2i32 */
921 case 3: /* as v4i32 (aligned to 4) */
922 case 4: /* as v4i32 */
923 vdata = LLVMGetUndef(LLVMVectorType(i32, util_next_power_of_two(num_comps)));
924 for (j = 0; j < num_comps; j++) {
925 vdata = LLVMBuildInsertElement(builder, vdata, out[j],
926 LLVMConstInt(i32, j, 0), "");
927 }
928 break;
929 }
930
931 build_streamout_store(shader, shader->so_buffers[buf_idx],
932 vdata, num_comps,
933 so_write_offset[buf_idx],
934 LLVMConstInt(i32, 0, 0),
935 so->output[i].dst_offset*4);
936 }
937 }
938 lp_build_endif(&if_ctx);
939 }
940
941
942 static int si_store_shader_io_attribs(struct si_shader *shader,
943 struct tgsi_full_declaration *d)
944 {
945 int i = -1;
946
947 switch (d->Declaration.File) {
948 case TGSI_FILE_INPUT:
949 i = shader->ninput++;
950 assert(i < Elements(shader->input));
951 shader->input[i].name = d->Semantic.Name;
952 shader->input[i].sid = d->Semantic.Index;
953 shader->input[i].interpolate = d->Interp.Interpolate;
954 shader->input[i].centroid = d->Interp.Centroid;
955 return -1;
956
957 case TGSI_FILE_OUTPUT:
958 i = shader->noutput++;
959 assert(i < Elements(shader->output));
960 shader->output[i].name = d->Semantic.Name;
961 shader->output[i].sid = d->Semantic.Index;
962 shader->output[i].index = d->Range.First;
963 shader->output[i].usage = d->Declaration.UsageMask;
964 break;
965 }
966
967 return i;
968 }
969
970 /* Generate export instructions for hardware VS shader stage */
971 static void si_llvm_export_vs(struct lp_build_tgsi_context *bld_base,
972 struct si_shader_output_values *outputs,
973 unsigned noutput)
974 {
975 struct si_shader_context * si_shader_ctx = si_shader_context(bld_base);
976 struct si_shader * shader = &si_shader_ctx->shader->shader;
977 struct lp_build_context * base = &bld_base->base;
978 struct lp_build_context * uint =
979 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
980 LLVMValueRef args[9];
981 LLVMValueRef pos_args[4][9] = { { 0 } };
982 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL;
983 unsigned semantic_name, semantic_index, semantic_usage;
984 unsigned target;
985 unsigned param_count = 0;
986 unsigned pos_idx;
987 int i;
988
989 if (si_shader_ctx->shader->selector->so.num_outputs) {
990 si_llvm_emit_streamout(si_shader_ctx);
991 }
992
993 for (i = 0; i < noutput; i++) {
994 semantic_name = outputs[i].name;
995 semantic_index = outputs[i].index;
996 semantic_usage = outputs[i].usage;
997
998 handle_semantic:
999 /* Select the correct target */
1000 switch(semantic_name) {
1001 case TGSI_SEMANTIC_PSIZE:
1002 shader->vs_out_misc_write = true;
1003 shader->vs_out_point_size = true;
1004 psize_value = outputs[i].values[0];
1005 continue;
1006 case TGSI_SEMANTIC_EDGEFLAG:
1007 shader->vs_out_misc_write = true;
1008 shader->vs_out_edgeflag = true;
1009 edgeflag_value = outputs[i].values[0];
1010 continue;
1011 case TGSI_SEMANTIC_LAYER:
1012 shader->vs_out_misc_write = true;
1013 shader->vs_out_layer = true;
1014 layer_value = outputs[i].values[0];
1015 continue;
1016 case TGSI_SEMANTIC_POSITION:
1017 target = V_008DFC_SQ_EXP_POS;
1018 break;
1019 case TGSI_SEMANTIC_COLOR:
1020 case TGSI_SEMANTIC_BCOLOR:
1021 target = V_008DFC_SQ_EXP_PARAM + param_count;
1022 shader->output[i].param_offset = param_count;
1023 param_count++;
1024 break;
1025 case TGSI_SEMANTIC_CLIPDIST:
1026 if (!(si_shader_ctx->shader->key.vs.ucps_enabled &
1027 (1 << semantic_index)))
1028 continue;
1029 shader->clip_dist_write |=
1030 semantic_usage << (semantic_index << 2);
1031 target = V_008DFC_SQ_EXP_POS + 2 + semantic_index;
1032 break;
1033 case TGSI_SEMANTIC_CLIPVERTEX:
1034 si_llvm_emit_clipvertex(bld_base, pos_args, outputs[i].values);
1035 continue;
1036 case TGSI_SEMANTIC_FOG:
1037 case TGSI_SEMANTIC_GENERIC:
1038 target = V_008DFC_SQ_EXP_PARAM + param_count;
1039 shader->output[i].param_offset = param_count;
1040 param_count++;
1041 break;
1042 default:
1043 target = 0;
1044 fprintf(stderr,
1045 "Warning: SI unhandled vs output type:%d\n",
1046 semantic_name);
1047 }
1048
1049 si_llvm_init_export_args(bld_base, outputs[i].values, target, args);
1050
1051 if (target >= V_008DFC_SQ_EXP_POS &&
1052 target <= (V_008DFC_SQ_EXP_POS + 3)) {
1053 memcpy(pos_args[target - V_008DFC_SQ_EXP_POS],
1054 args, sizeof(args));
1055 } else {
1056 lp_build_intrinsic(base->gallivm->builder,
1057 "llvm.SI.export",
1058 LLVMVoidTypeInContext(base->gallivm->context),
1059 args, 9);
1060 }
1061
1062 if (semantic_name == TGSI_SEMANTIC_CLIPDIST) {
1063 semantic_name = TGSI_SEMANTIC_GENERIC;
1064 goto handle_semantic;
1065 }
1066 }
1067
1068 /* We need to add the position output manually if it's missing. */
1069 if (!pos_args[0][0]) {
1070 pos_args[0][0] = lp_build_const_int32(base->gallivm, 0xf); /* writemask */
1071 pos_args[0][1] = uint->zero; /* EXEC mask */
1072 pos_args[0][2] = uint->zero; /* last export? */
1073 pos_args[0][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS);
1074 pos_args[0][4] = uint->zero; /* COMPR flag */
1075 pos_args[0][5] = base->zero; /* X */
1076 pos_args[0][6] = base->zero; /* Y */
1077 pos_args[0][7] = base->zero; /* Z */
1078 pos_args[0][8] = base->one; /* W */
1079 }
1080
1081 /* Write the misc vector (point size, edgeflag, layer, viewport). */
1082 if (shader->vs_out_misc_write) {
1083 pos_args[1][0] = lp_build_const_int32(base->gallivm, /* writemask */
1084 shader->vs_out_point_size |
1085 (shader->vs_out_edgeflag << 1) |
1086 (shader->vs_out_layer << 2));
1087 pos_args[1][1] = uint->zero; /* EXEC mask */
1088 pos_args[1][2] = uint->zero; /* last export? */
1089 pos_args[1][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS + 1);
1090 pos_args[1][4] = uint->zero; /* COMPR flag */
1091 pos_args[1][5] = base->zero; /* X */
1092 pos_args[1][6] = base->zero; /* Y */
1093 pos_args[1][7] = base->zero; /* Z */
1094 pos_args[1][8] = base->zero; /* W */
1095
1096 if (shader->vs_out_point_size)
1097 pos_args[1][5] = psize_value;
1098
1099 if (shader->vs_out_edgeflag) {
1100 /* The output is a float, but the hw expects an integer
1101 * with the first bit containing the edge flag. */
1102 edgeflag_value = LLVMBuildFPToUI(base->gallivm->builder,
1103 edgeflag_value,
1104 bld_base->uint_bld.elem_type, "");
1105 edgeflag_value = lp_build_min(&bld_base->int_bld,
1106 edgeflag_value,
1107 bld_base->int_bld.one);
1108
1109 /* The LLVM intrinsic expects a float. */
1110 pos_args[1][6] = LLVMBuildBitCast(base->gallivm->builder,
1111 edgeflag_value,
1112 base->elem_type, "");
1113 }
1114
1115 if (shader->vs_out_layer)
1116 pos_args[1][7] = layer_value;
1117 }
1118
1119 for (i = 0; i < 4; i++)
1120 if (pos_args[i][0])
1121 shader->nr_pos_exports++;
1122
1123 pos_idx = 0;
1124 for (i = 0; i < 4; i++) {
1125 if (!pos_args[i][0])
1126 continue;
1127
1128 /* Specify the target we are exporting */
1129 pos_args[i][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS + pos_idx++);
1130
1131 if (pos_idx == shader->nr_pos_exports)
1132 /* Specify that this is the last export */
1133 pos_args[i][2] = uint->one;
1134
1135 lp_build_intrinsic(base->gallivm->builder,
1136 "llvm.SI.export",
1137 LLVMVoidTypeInContext(base->gallivm->context),
1138 pos_args[i], 9);
1139 }
1140 }
1141
1142 static void si_llvm_emit_es_epilogue(struct lp_build_tgsi_context * bld_base)
1143 {
1144 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
1145 struct gallivm_state *gallivm = bld_base->base.gallivm;
1146 struct si_pipe_shader *shader = si_shader_ctx->shader;
1147 struct tgsi_parse_context *parse = &si_shader_ctx->parse;
1148 LLVMTypeRef i32 = LLVMInt32TypeInContext(gallivm->context);
1149 LLVMValueRef t_list_ptr;
1150 LLVMValueRef t_list;
1151 unsigned chan;
1152 int i;
1153
1154 while (!tgsi_parse_end_of_tokens(parse)) {
1155 struct tgsi_full_declaration *d =
1156 &parse->FullToken.FullDeclaration;
1157
1158 tgsi_parse_token(parse);
1159
1160 if (parse->FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
1161 continue;
1162
1163 si_store_shader_io_attribs(&shader->shader, d);
1164 }
1165
1166 /* Load the ESGS ring resource descriptor */
1167 t_list_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
1168 t_list = build_indexed_load(si_shader_ctx, t_list_ptr,
1169 lp_build_const_int32(gallivm,
1170 NUM_PIPE_CONST_BUFFERS + 1));
1171
1172 for (i = 0; i < shader->shader.noutput; i++) {
1173 LLVMValueRef *out_ptr =
1174 si_shader_ctx->radeon_bld.soa.outputs[shader->shader.output[i].index];
1175
1176 for (chan = 0; chan < 4; chan++) {
1177 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
1178 LLVMValueRef voffset =
1179 lp_build_const_int32(gallivm, (4 * i + chan) * 4);
1180 LLVMValueRef soffset =
1181 LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
1182 SI_PARAM_ES2GS_OFFSET);
1183
1184 out_val = LLVMBuildBitCast(gallivm->builder, out_val, i32, "");
1185
1186 build_tbuffer_store(si_shader_ctx, t_list, out_val, 1,
1187 voffset, soffset, 0,
1188 V_008F0C_BUF_DATA_FORMAT_32,
1189 V_008F0C_BUF_NUM_FORMAT_UINT,
1190 1, 0, 1, 1, 0);
1191 }
1192 }
1193 }
1194
1195 static void si_llvm_emit_gs_epilogue(struct lp_build_tgsi_context *bld_base)
1196 {
1197 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
1198 struct gallivm_state *gallivm = bld_base->base.gallivm;
1199 LLVMValueRef args[2];
1200
1201 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_NOP | SENDMSG_GS_DONE);
1202 args[1] = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_GS_WAVE_ID);
1203 build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
1204 LLVMVoidTypeInContext(gallivm->context), args, 2,
1205 LLVMNoUnwindAttribute);
1206 }
1207
1208 static void si_llvm_emit_vs_epilogue(struct lp_build_tgsi_context * bld_base)
1209 {
1210 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
1211 struct gallivm_state *gallivm = bld_base->base.gallivm;
1212 struct si_pipe_shader *shader = si_shader_ctx->shader;
1213 struct tgsi_parse_context *parse = &si_shader_ctx->parse;
1214 struct si_shader_output_values *outputs = NULL;
1215 unsigned noutput = 0;
1216 int i;
1217
1218 while (!tgsi_parse_end_of_tokens(parse)) {
1219 struct tgsi_full_declaration *d =
1220 &parse->FullToken.FullDeclaration;
1221 unsigned index;
1222
1223 tgsi_parse_token(parse);
1224
1225 if (parse->FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
1226 continue;
1227
1228 i = si_store_shader_io_attribs(&shader->shader, d);
1229 if (i < 0)
1230 continue;
1231
1232 outputs = REALLOC(outputs, noutput * sizeof(outputs[0]),
1233 (noutput + 1) * sizeof(outputs[0]));
1234 for (index = d->Range.First; index <= d->Range.Last; index++) {
1235 outputs[noutput].name = d->Semantic.Name;
1236 outputs[noutput].index = d->Semantic.Index;
1237 outputs[noutput].usage = d->Declaration.UsageMask;
1238
1239 for (i = 0; i < 4; i++)
1240 outputs[noutput].values[i] =
1241 LLVMBuildLoad(gallivm->builder,
1242 si_shader_ctx->radeon_bld.soa.outputs[index][i],
1243 "");
1244 }
1245 noutput++;
1246 }
1247
1248 si_llvm_export_vs(bld_base, outputs, noutput);
1249 FREE(outputs);
1250 }
1251
1252 static void si_llvm_emit_fs_epilogue(struct lp_build_tgsi_context * bld_base)
1253 {
1254 struct si_shader_context * si_shader_ctx = si_shader_context(bld_base);
1255 struct si_shader * shader = &si_shader_ctx->shader->shader;
1256 struct lp_build_context * base = &bld_base->base;
1257 struct lp_build_context * uint =
1258 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
1259 struct tgsi_parse_context *parse = &si_shader_ctx->parse;
1260 LLVMValueRef args[9];
1261 LLVMValueRef last_args[9] = { 0 };
1262 unsigned semantic_name;
1263 int depth_index = -1, stencil_index = -1;
1264 int i;
1265
1266 while (!tgsi_parse_end_of_tokens(parse)) {
1267 struct tgsi_full_declaration *d =
1268 &parse->FullToken.FullDeclaration;
1269 unsigned target;
1270 unsigned index;
1271
1272 tgsi_parse_token(parse);
1273
1274 if (parse->FullToken.Token.Type == TGSI_TOKEN_TYPE_PROPERTY &&
1275 parse->FullToken.FullProperty.Property.PropertyName ==
1276 TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS)
1277 shader->fs_write_all = TRUE;
1278
1279 if (parse->FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
1280 continue;
1281
1282 i = si_store_shader_io_attribs(shader, d);
1283 if (i < 0)
1284 continue;
1285
1286 semantic_name = d->Semantic.Name;
1287 for (index = d->Range.First; index <= d->Range.Last; index++) {
1288 /* Select the correct target */
1289 switch(semantic_name) {
1290 case TGSI_SEMANTIC_POSITION:
1291 depth_index = index;
1292 continue;
1293 case TGSI_SEMANTIC_STENCIL:
1294 stencil_index = index;
1295 continue;
1296 case TGSI_SEMANTIC_COLOR:
1297 target = V_008DFC_SQ_EXP_MRT + d->Semantic.Index;
1298 if (si_shader_ctx->shader->key.ps.alpha_to_one)
1299 LLVMBuildStore(bld_base->base.gallivm->builder,
1300 bld_base->base.one,
1301 si_shader_ctx->radeon_bld.soa.outputs[index][3]);
1302
1303 if (d->Semantic.Index == 0 &&
1304 si_shader_ctx->shader->key.ps.alpha_func != PIPE_FUNC_ALWAYS)
1305 si_alpha_test(bld_base,
1306 si_shader_ctx->radeon_bld.soa.outputs[index]);
1307 break;
1308 default:
1309 target = 0;
1310 fprintf(stderr,
1311 "Warning: SI unhandled fs output type:%d\n",
1312 semantic_name);
1313 }
1314
1315 si_llvm_init_export_args_load(bld_base,
1316 si_shader_ctx->radeon_bld.soa.outputs[index],
1317 target, args);
1318
1319 if (semantic_name == TGSI_SEMANTIC_COLOR) {
1320 /* If there is an export instruction waiting to be emitted, do so now. */
1321 if (last_args[0]) {
1322 lp_build_intrinsic(base->gallivm->builder,
1323 "llvm.SI.export",
1324 LLVMVoidTypeInContext(base->gallivm->context),
1325 last_args, 9);
1326 }
1327
1328 /* This instruction will be emitted at the end of the shader. */
1329 memcpy(last_args, args, sizeof(args));
1330
1331 /* Handle FS_COLOR0_WRITES_ALL_CBUFS. */
1332 if (shader->fs_write_all && shader->output[i].sid == 0 &&
1333 si_shader_ctx->shader->key.ps.nr_cbufs > 1) {
1334 for (int c = 1; c < si_shader_ctx->shader->key.ps.nr_cbufs; c++) {
1335 si_llvm_init_export_args_load(bld_base,
1336 si_shader_ctx->radeon_bld.soa.outputs[index],
1337 V_008DFC_SQ_EXP_MRT + c, args);
1338 lp_build_intrinsic(base->gallivm->builder,
1339 "llvm.SI.export",
1340 LLVMVoidTypeInContext(base->gallivm->context),
1341 args, 9);
1342 }
1343 }
1344 } else {
1345 lp_build_intrinsic(base->gallivm->builder,
1346 "llvm.SI.export",
1347 LLVMVoidTypeInContext(base->gallivm->context),
1348 args, 9);
1349 }
1350 }
1351 }
1352
1353 if (depth_index >= 0 || stencil_index >= 0) {
1354 LLVMValueRef out_ptr;
1355 unsigned mask = 0;
1356
1357 /* Specify the target we are exporting */
1358 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRTZ);
1359
1360 if (depth_index >= 0) {
1361 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[depth_index][2];
1362 args[5] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
1363 mask |= 0x1;
1364
1365 if (stencil_index < 0) {
1366 args[6] =
1367 args[7] =
1368 args[8] = args[5];
1369 }
1370 }
1371
1372 if (stencil_index >= 0) {
1373 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[stencil_index][1];
1374 args[7] =
1375 args[8] =
1376 args[6] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
1377 /* Only setting the stencil component bit (0x2) here
1378 * breaks some stencil piglit tests
1379 */
1380 mask |= 0x3;
1381
1382 if (depth_index < 0)
1383 args[5] = args[6];
1384 }
1385
1386 /* Specify which components to enable */
1387 args[0] = lp_build_const_int32(base->gallivm, mask);
1388
1389 args[1] =
1390 args[2] =
1391 args[4] = uint->zero;
1392
1393 if (last_args[0])
1394 lp_build_intrinsic(base->gallivm->builder,
1395 "llvm.SI.export",
1396 LLVMVoidTypeInContext(base->gallivm->context),
1397 args, 9);
1398 else
1399 memcpy(last_args, args, sizeof(args));
1400 }
1401
1402 if (!last_args[0]) {
1403 /* Specify which components to enable */
1404 last_args[0] = lp_build_const_int32(base->gallivm, 0x0);
1405
1406 /* Specify the target we are exporting */
1407 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
1408
1409 /* Set COMPR flag to zero to export data as 32-bit */
1410 last_args[4] = uint->zero;
1411
1412 /* dummy bits */
1413 last_args[5]= uint->zero;
1414 last_args[6]= uint->zero;
1415 last_args[7]= uint->zero;
1416 last_args[8]= uint->zero;
1417
1418 si_shader_ctx->shader->spi_shader_col_format |=
1419 V_028714_SPI_SHADER_32_ABGR;
1420 si_shader_ctx->shader->cb_shader_mask |= S_02823C_OUTPUT0_ENABLE(0xf);
1421 }
1422
1423 /* Specify whether the EXEC mask represents the valid mask */
1424 last_args[1] = uint->one;
1425
1426 /* Specify that this is the last export */
1427 last_args[2] = lp_build_const_int32(base->gallivm, 1);
1428
1429 lp_build_intrinsic(base->gallivm->builder,
1430 "llvm.SI.export",
1431 LLVMVoidTypeInContext(base->gallivm->context),
1432 last_args, 9);
1433 }
1434
1435 static const struct lp_build_tgsi_action txf_action;
1436
1437 static void build_tex_intrinsic(const struct lp_build_tgsi_action * action,
1438 struct lp_build_tgsi_context * bld_base,
1439 struct lp_build_emit_data * emit_data);
1440
1441 static void tex_fetch_args(
1442 struct lp_build_tgsi_context * bld_base,
1443 struct lp_build_emit_data * emit_data)
1444 {
1445 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
1446 struct gallivm_state *gallivm = bld_base->base.gallivm;
1447 const struct tgsi_full_instruction * inst = emit_data->inst;
1448 unsigned opcode = inst->Instruction.Opcode;
1449 unsigned target = inst->Texture.Texture;
1450 LLVMValueRef coords[4];
1451 LLVMValueRef address[16];
1452 int ref_pos;
1453 unsigned num_coords = tgsi_util_get_texture_coord_dim(target, &ref_pos);
1454 unsigned count = 0;
1455 unsigned chan;
1456 unsigned sampler_src = emit_data->inst->Instruction.NumSrcRegs - 1;
1457 unsigned sampler_index = emit_data->inst->Src[sampler_src].Register.Index;
1458
1459 if (target == TGSI_TEXTURE_BUFFER) {
1460 LLVMTypeRef i128 = LLVMIntTypeInContext(gallivm->context, 128);
1461 LLVMTypeRef v2i128 = LLVMVectorType(i128, 2);
1462 LLVMTypeRef i8 = LLVMInt8TypeInContext(gallivm->context);
1463 LLVMTypeRef v16i8 = LLVMVectorType(i8, 16);
1464
1465 /* Truncate v32i8 to v16i8. */
1466 LLVMValueRef res = si_shader_ctx->resources[sampler_index];
1467 res = LLVMBuildBitCast(gallivm->builder, res, v2i128, "");
1468 res = LLVMBuildExtractElement(gallivm->builder, res, bld_base->uint_bld.zero, "");
1469 res = LLVMBuildBitCast(gallivm->builder, res, v16i8, "");
1470
1471 emit_data->dst_type = LLVMVectorType(bld_base->base.elem_type, 4);
1472 emit_data->args[0] = res;
1473 emit_data->args[1] = bld_base->uint_bld.zero;
1474 emit_data->args[2] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, 0);
1475 emit_data->arg_count = 3;
1476 return;
1477 }
1478
1479 /* Fetch and project texture coordinates */
1480 coords[3] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
1481 for (chan = 0; chan < 3; chan++ ) {
1482 coords[chan] = lp_build_emit_fetch(bld_base,
1483 emit_data->inst, 0,
1484 chan);
1485 if (opcode == TGSI_OPCODE_TXP)
1486 coords[chan] = lp_build_emit_llvm_binary(bld_base,
1487 TGSI_OPCODE_DIV,
1488 coords[chan],
1489 coords[3]);
1490 }
1491
1492 if (opcode == TGSI_OPCODE_TXP)
1493 coords[3] = bld_base->base.one;
1494
1495 /* Pack LOD bias value */
1496 if (opcode == TGSI_OPCODE_TXB)
1497 address[count++] = coords[3];
1498
1499 if (target == TGSI_TEXTURE_CUBE || target == TGSI_TEXTURE_SHADOWCUBE)
1500 radeon_llvm_emit_prepare_cube_coords(bld_base, emit_data, coords);
1501
1502 /* Pack depth comparison value */
1503 switch (target) {
1504 case TGSI_TEXTURE_SHADOW1D:
1505 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1506 case TGSI_TEXTURE_SHADOW2D:
1507 case TGSI_TEXTURE_SHADOWRECT:
1508 case TGSI_TEXTURE_SHADOWCUBE:
1509 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1510 assert(ref_pos >= 0);
1511 address[count++] = coords[ref_pos];
1512 break;
1513 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
1514 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, 0);
1515 }
1516
1517 /* Pack user derivatives */
1518 if (opcode == TGSI_OPCODE_TXD) {
1519 for (chan = 0; chan < 2; chan++) {
1520 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, chan);
1521 if (num_coords > 1)
1522 address[count++] = lp_build_emit_fetch(bld_base, inst, 2, chan);
1523 }
1524 }
1525
1526 /* Pack texture coordinates */
1527 address[count++] = coords[0];
1528 if (num_coords > 1)
1529 address[count++] = coords[1];
1530 if (num_coords > 2)
1531 address[count++] = coords[2];
1532
1533 /* Pack LOD or sample index */
1534 if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXF)
1535 address[count++] = coords[3];
1536
1537 if (count > 16) {
1538 assert(!"Cannot handle more than 16 texture address parameters");
1539 count = 16;
1540 }
1541
1542 for (chan = 0; chan < count; chan++ ) {
1543 address[chan] = LLVMBuildBitCast(gallivm->builder,
1544 address[chan],
1545 LLVMInt32TypeInContext(gallivm->context),
1546 "");
1547 }
1548
1549 /* Adjust the sample index according to FMASK.
1550 *
1551 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
1552 * which is the identity mapping. Each nibble says which physical sample
1553 * should be fetched to get that sample.
1554 *
1555 * For example, 0x11111100 means there are only 2 samples stored and
1556 * the second sample covers 3/4 of the pixel. When reading samples 0
1557 * and 1, return physical sample 0 (determined by the first two 0s
1558 * in FMASK), otherwise return physical sample 1.
1559 *
1560 * The sample index should be adjusted as follows:
1561 * sample_index = (fmask >> (sample_index * 4)) & 0xF;
1562 */
1563 if (target == TGSI_TEXTURE_2D_MSAA ||
1564 target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
1565 struct lp_build_context *uint_bld = &bld_base->uint_bld;
1566 struct lp_build_emit_data txf_emit_data = *emit_data;
1567 LLVMValueRef txf_address[4];
1568 unsigned txf_count = count;
1569
1570 memcpy(txf_address, address, sizeof(txf_address));
1571
1572 if (target == TGSI_TEXTURE_2D_MSAA) {
1573 txf_address[2] = bld_base->uint_bld.zero;
1574 }
1575 txf_address[3] = bld_base->uint_bld.zero;
1576
1577 /* Pad to a power-of-two size. */
1578 while (txf_count < util_next_power_of_two(txf_count))
1579 txf_address[txf_count++] = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
1580
1581 /* Read FMASK using TXF. */
1582 txf_emit_data.chan = 0;
1583 txf_emit_data.dst_type = LLVMVectorType(
1584 LLVMInt32TypeInContext(bld_base->base.gallivm->context), 4);
1585 txf_emit_data.args[0] = lp_build_gather_values(gallivm, txf_address, txf_count);
1586 txf_emit_data.args[1] = si_shader_ctx->resources[FMASK_TEX_OFFSET + sampler_index];
1587 txf_emit_data.args[2] = lp_build_const_int32(bld_base->base.gallivm,
1588 target == TGSI_TEXTURE_2D_MSAA ? TGSI_TEXTURE_2D : TGSI_TEXTURE_2D_ARRAY);
1589 txf_emit_data.arg_count = 3;
1590
1591 build_tex_intrinsic(&txf_action, bld_base, &txf_emit_data);
1592
1593 /* Initialize some constants. */
1594 LLVMValueRef four = LLVMConstInt(uint_bld->elem_type, 4, 0);
1595 LLVMValueRef F = LLVMConstInt(uint_bld->elem_type, 0xF, 0);
1596
1597 /* Apply the formula. */
1598 LLVMValueRef fmask =
1599 LLVMBuildExtractElement(gallivm->builder,
1600 txf_emit_data.output[0],
1601 uint_bld->zero, "");
1602
1603 unsigned sample_chan = target == TGSI_TEXTURE_2D_MSAA ? 2 : 3;
1604
1605 LLVMValueRef sample_index4 =
1606 LLVMBuildMul(gallivm->builder, address[sample_chan], four, "");
1607
1608 LLVMValueRef shifted_fmask =
1609 LLVMBuildLShr(gallivm->builder, fmask, sample_index4, "");
1610
1611 LLVMValueRef final_sample =
1612 LLVMBuildAnd(gallivm->builder, shifted_fmask, F, "");
1613
1614 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
1615 * resource descriptor is 0 (invalid),
1616 */
1617 LLVMValueRef fmask_desc =
1618 LLVMBuildBitCast(gallivm->builder,
1619 si_shader_ctx->resources[FMASK_TEX_OFFSET + sampler_index],
1620 LLVMVectorType(uint_bld->elem_type, 8), "");
1621
1622 LLVMValueRef fmask_word1 =
1623 LLVMBuildExtractElement(gallivm->builder, fmask_desc,
1624 uint_bld->one, "");
1625
1626 LLVMValueRef word1_is_nonzero =
1627 LLVMBuildICmp(gallivm->builder, LLVMIntNE,
1628 fmask_word1, uint_bld->zero, "");
1629
1630 /* Replace the MSAA sample index. */
1631 address[sample_chan] =
1632 LLVMBuildSelect(gallivm->builder, word1_is_nonzero,
1633 final_sample, address[sample_chan], "");
1634 }
1635
1636 /* Resource */
1637 emit_data->args[1] = si_shader_ctx->resources[sampler_index];
1638
1639 if (opcode == TGSI_OPCODE_TXF) {
1640 /* add tex offsets */
1641 if (inst->Texture.NumOffsets) {
1642 struct lp_build_context *uint_bld = &bld_base->uint_bld;
1643 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
1644 const struct tgsi_texture_offset * off = inst->TexOffsets;
1645
1646 assert(inst->Texture.NumOffsets == 1);
1647
1648 switch (target) {
1649 case TGSI_TEXTURE_3D:
1650 address[2] = lp_build_add(uint_bld, address[2],
1651 bld->immediates[off->Index][off->SwizzleZ]);
1652 /* fall through */
1653 case TGSI_TEXTURE_2D:
1654 case TGSI_TEXTURE_SHADOW2D:
1655 case TGSI_TEXTURE_RECT:
1656 case TGSI_TEXTURE_SHADOWRECT:
1657 case TGSI_TEXTURE_2D_ARRAY:
1658 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1659 address[1] =
1660 lp_build_add(uint_bld, address[1],
1661 bld->immediates[off->Index][off->SwizzleY]);
1662 /* fall through */
1663 case TGSI_TEXTURE_1D:
1664 case TGSI_TEXTURE_SHADOW1D:
1665 case TGSI_TEXTURE_1D_ARRAY:
1666 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1667 address[0] =
1668 lp_build_add(uint_bld, address[0],
1669 bld->immediates[off->Index][off->SwizzleX]);
1670 break;
1671 /* texture offsets do not apply to other texture targets */
1672 }
1673 }
1674
1675 emit_data->dst_type = LLVMVectorType(
1676 LLVMInt32TypeInContext(bld_base->base.gallivm->context),
1677 4);
1678
1679 emit_data->arg_count = 3;
1680 } else {
1681 /* Sampler */
1682 emit_data->args[2] = si_shader_ctx->samplers[sampler_index];
1683
1684 emit_data->dst_type = LLVMVectorType(
1685 LLVMFloatTypeInContext(bld_base->base.gallivm->context),
1686 4);
1687
1688 emit_data->arg_count = 4;
1689 }
1690
1691 /* Dimensions */
1692 emit_data->args[emit_data->arg_count - 1] =
1693 lp_build_const_int32(bld_base->base.gallivm, target);
1694
1695 /* Pad to power of two vector */
1696 while (count < util_next_power_of_two(count))
1697 address[count++] = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
1698
1699 emit_data->args[0] = lp_build_gather_values(gallivm, address, count);
1700 }
1701
1702 static void build_tex_intrinsic(const struct lp_build_tgsi_action * action,
1703 struct lp_build_tgsi_context * bld_base,
1704 struct lp_build_emit_data * emit_data)
1705 {
1706 struct lp_build_context * base = &bld_base->base;
1707 char intr_name[127];
1708
1709 if (emit_data->inst->Texture.Texture == TGSI_TEXTURE_BUFFER) {
1710 emit_data->output[emit_data->chan] = build_intrinsic(
1711 base->gallivm->builder,
1712 "llvm.SI.vs.load.input", emit_data->dst_type,
1713 emit_data->args, emit_data->arg_count,
1714 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1715 return;
1716 }
1717
1718 sprintf(intr_name, "%sv%ui32", action->intr_name,
1719 LLVMGetVectorSize(LLVMTypeOf(emit_data->args[0])));
1720
1721 emit_data->output[emit_data->chan] = build_intrinsic(
1722 base->gallivm->builder, intr_name, emit_data->dst_type,
1723 emit_data->args, emit_data->arg_count,
1724 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1725 }
1726
1727 static void txq_fetch_args(
1728 struct lp_build_tgsi_context * bld_base,
1729 struct lp_build_emit_data * emit_data)
1730 {
1731 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
1732 const struct tgsi_full_instruction *inst = emit_data->inst;
1733 struct gallivm_state *gallivm = bld_base->base.gallivm;
1734
1735 if (inst->Texture.Texture == TGSI_TEXTURE_BUFFER) {
1736 LLVMTypeRef i32 = LLVMInt32TypeInContext(gallivm->context);
1737 LLVMTypeRef v8i32 = LLVMVectorType(i32, 8);
1738
1739 /* Read the size from the buffer descriptor directly. */
1740 LLVMValueRef size = si_shader_ctx->resources[inst->Src[1].Register.Index];
1741 size = LLVMBuildBitCast(gallivm->builder, size, v8i32, "");
1742 size = LLVMBuildExtractElement(gallivm->builder, size,
1743 lp_build_const_int32(gallivm, 2), "");
1744 emit_data->args[0] = size;
1745 return;
1746 }
1747
1748 /* Mip level */
1749 emit_data->args[0] = lp_build_emit_fetch(bld_base, inst, 0, TGSI_CHAN_X);
1750
1751 /* Resource */
1752 emit_data->args[1] = si_shader_ctx->resources[inst->Src[1].Register.Index];
1753
1754 /* Dimensions */
1755 emit_data->args[2] = lp_build_const_int32(bld_base->base.gallivm,
1756 inst->Texture.Texture);
1757
1758 emit_data->arg_count = 3;
1759
1760 emit_data->dst_type = LLVMVectorType(
1761 LLVMInt32TypeInContext(bld_base->base.gallivm->context),
1762 4);
1763 }
1764
1765 static void build_txq_intrinsic(const struct lp_build_tgsi_action * action,
1766 struct lp_build_tgsi_context * bld_base,
1767 struct lp_build_emit_data * emit_data)
1768 {
1769 if (emit_data->inst->Texture.Texture == TGSI_TEXTURE_BUFFER) {
1770 /* Just return the buffer size. */
1771 emit_data->output[emit_data->chan] = emit_data->args[0];
1772 return;
1773 }
1774
1775 build_tgsi_intrinsic_nomem(action, bld_base, emit_data);
1776 }
1777
1778 #if HAVE_LLVM >= 0x0304
1779
1780 static void si_llvm_emit_ddxy(
1781 const struct lp_build_tgsi_action * action,
1782 struct lp_build_tgsi_context * bld_base,
1783 struct lp_build_emit_data * emit_data)
1784 {
1785 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
1786 struct gallivm_state *gallivm = bld_base->base.gallivm;
1787 struct lp_build_context * base = &bld_base->base;
1788 const struct tgsi_full_instruction *inst = emit_data->inst;
1789 unsigned opcode = inst->Instruction.Opcode;
1790 LLVMValueRef indices[2];
1791 LLVMValueRef store_ptr, load_ptr0, load_ptr1;
1792 LLVMValueRef tl, trbl, result[4];
1793 LLVMTypeRef i32;
1794 unsigned swizzle[4];
1795 unsigned c;
1796
1797 i32 = LLVMInt32TypeInContext(gallivm->context);
1798
1799 indices[0] = bld_base->uint_bld.zero;
1800 indices[1] = build_intrinsic(gallivm->builder, "llvm.SI.tid", i32,
1801 NULL, 0, LLVMReadNoneAttribute);
1802 store_ptr = LLVMBuildGEP(gallivm->builder, si_shader_ctx->ddxy_lds,
1803 indices, 2, "");
1804
1805 indices[1] = LLVMBuildAnd(gallivm->builder, indices[1],
1806 lp_build_const_int32(gallivm, 0xfffffffc), "");
1807 load_ptr0 = LLVMBuildGEP(gallivm->builder, si_shader_ctx->ddxy_lds,
1808 indices, 2, "");
1809
1810 indices[1] = LLVMBuildAdd(gallivm->builder, indices[1],
1811 lp_build_const_int32(gallivm,
1812 opcode == TGSI_OPCODE_DDX ? 1 : 2),
1813 "");
1814 load_ptr1 = LLVMBuildGEP(gallivm->builder, si_shader_ctx->ddxy_lds,
1815 indices, 2, "");
1816
1817 for (c = 0; c < 4; ++c) {
1818 unsigned i;
1819
1820 swizzle[c] = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], c);
1821 for (i = 0; i < c; ++i) {
1822 if (swizzle[i] == swizzle[c]) {
1823 result[c] = result[i];
1824 break;
1825 }
1826 }
1827 if (i != c)
1828 continue;
1829
1830 LLVMBuildStore(gallivm->builder,
1831 LLVMBuildBitCast(gallivm->builder,
1832 lp_build_emit_fetch(bld_base, inst, 0, c),
1833 i32, ""),
1834 store_ptr);
1835
1836 tl = LLVMBuildLoad(gallivm->builder, load_ptr0, "");
1837 tl = LLVMBuildBitCast(gallivm->builder, tl, base->elem_type, "");
1838
1839 trbl = LLVMBuildLoad(gallivm->builder, load_ptr1, "");
1840 trbl = LLVMBuildBitCast(gallivm->builder, trbl, base->elem_type, "");
1841
1842 result[c] = LLVMBuildFSub(gallivm->builder, trbl, tl, "");
1843 }
1844
1845 emit_data->output[0] = lp_build_gather_values(gallivm, result, 4);
1846 }
1847
1848 #endif /* HAVE_LLVM >= 0x0304 */
1849
1850 /* Emit one vertex from the geometry shader */
1851 static void si_llvm_emit_vertex(
1852 const struct lp_build_tgsi_action *action,
1853 struct lp_build_tgsi_context *bld_base,
1854 struct lp_build_emit_data *emit_data)
1855 {
1856 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
1857 struct si_shader *shader = &si_shader_ctx->shader->shader;
1858 struct gallivm_state *gallivm = bld_base->base.gallivm;
1859 LLVMTypeRef i32 = LLVMInt32TypeInContext(gallivm->context);
1860 LLVMValueRef t_list_ptr;
1861 LLVMValueRef t_list;
1862 LLVMValueRef args[2];
1863 unsigned chan;
1864 int i;
1865
1866 /* Load the GSVS ring resource descriptor */
1867 t_list_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
1868 t_list = build_indexed_load(si_shader_ctx, t_list_ptr,
1869 lp_build_const_int32(gallivm,
1870 NUM_PIPE_CONST_BUFFERS + 2));
1871
1872 if (shader->noutput == 0) {
1873 struct tgsi_parse_context *parse = &si_shader_ctx->parse;
1874
1875 while (!tgsi_parse_end_of_tokens(parse)) {
1876 tgsi_parse_token(parse);
1877
1878 if (parse->FullToken.Token.Type == TGSI_TOKEN_TYPE_DECLARATION)
1879 si_store_shader_io_attribs(shader,
1880 &parse->FullToken.FullDeclaration);
1881 }
1882 }
1883
1884 /* Write vertex attribute values to GSVS ring */
1885 for (i = 0; i < shader->noutput; i++) {
1886 LLVMValueRef *out_ptr =
1887 si_shader_ctx->radeon_bld.soa.outputs[shader->output[i].index];
1888
1889 for (chan = 0; chan < 4; chan++) {
1890 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
1891 LLVMValueRef voffset =
1892 lp_build_const_int32(gallivm,
1893 ((i * 4 + chan) *
1894 shader->gs_max_out_vertices +
1895 si_shader_ctx->gs_next_vertex) * 4);
1896 LLVMValueRef soffset =
1897 LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
1898 SI_PARAM_GS2VS_OFFSET);
1899
1900 out_val = LLVMBuildBitCast(gallivm->builder, out_val, i32, "");
1901
1902 build_tbuffer_store(si_shader_ctx, t_list, out_val, 1,
1903 voffset, soffset, 0,
1904 V_008F0C_BUF_DATA_FORMAT_32,
1905 V_008F0C_BUF_NUM_FORMAT_UINT,
1906 1, 0, 1, 1, 0);
1907 }
1908 }
1909 si_shader_ctx->gs_next_vertex++;
1910
1911 /* Signal vertex emission */
1912 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_EMIT | SENDMSG_GS);
1913 args[1] = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_GS_WAVE_ID);
1914 build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
1915 LLVMVoidTypeInContext(gallivm->context), args, 2,
1916 LLVMNoUnwindAttribute);
1917 }
1918
1919 /* Cut one primitive from the geometry shader */
1920 static void si_llvm_emit_primitive(
1921 const struct lp_build_tgsi_action *action,
1922 struct lp_build_tgsi_context *bld_base,
1923 struct lp_build_emit_data *emit_data)
1924 {
1925 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
1926 struct gallivm_state *gallivm = bld_base->base.gallivm;
1927 LLVMValueRef args[2];
1928
1929 /* Signal primitive cut */
1930 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_CUT | SENDMSG_GS);
1931 args[1] = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_GS_WAVE_ID);
1932 build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
1933 LLVMVoidTypeInContext(gallivm->context), args, 2,
1934 LLVMNoUnwindAttribute);
1935 }
1936
1937 static const struct lp_build_tgsi_action tex_action = {
1938 .fetch_args = tex_fetch_args,
1939 .emit = build_tex_intrinsic,
1940 .intr_name = "llvm.SI.sample."
1941 };
1942
1943 static const struct lp_build_tgsi_action txb_action = {
1944 .fetch_args = tex_fetch_args,
1945 .emit = build_tex_intrinsic,
1946 .intr_name = "llvm.SI.sampleb."
1947 };
1948
1949 #if HAVE_LLVM >= 0x0304
1950 static const struct lp_build_tgsi_action txd_action = {
1951 .fetch_args = tex_fetch_args,
1952 .emit = build_tex_intrinsic,
1953 .intr_name = "llvm.SI.sampled."
1954 };
1955 #endif
1956
1957 static const struct lp_build_tgsi_action txf_action = {
1958 .fetch_args = tex_fetch_args,
1959 .emit = build_tex_intrinsic,
1960 .intr_name = "llvm.SI.imageload."
1961 };
1962
1963 static const struct lp_build_tgsi_action txl_action = {
1964 .fetch_args = tex_fetch_args,
1965 .emit = build_tex_intrinsic,
1966 .intr_name = "llvm.SI.samplel."
1967 };
1968
1969 static const struct lp_build_tgsi_action txq_action = {
1970 .fetch_args = txq_fetch_args,
1971 .emit = build_txq_intrinsic,
1972 .intr_name = "llvm.SI.resinfo"
1973 };
1974
1975 static void create_meta_data(struct si_shader_context *si_shader_ctx)
1976 {
1977 struct gallivm_state *gallivm = si_shader_ctx->radeon_bld.soa.bld_base.base.gallivm;
1978 LLVMValueRef args[3];
1979
1980 args[0] = LLVMMDStringInContext(gallivm->context, "const", 5);
1981 args[1] = 0;
1982 args[2] = lp_build_const_int32(gallivm, 1);
1983
1984 si_shader_ctx->const_md = LLVMMDNodeInContext(gallivm->context, args, 3);
1985 }
1986
1987 static void create_function(struct si_shader_context *si_shader_ctx)
1988 {
1989 struct lp_build_tgsi_context *bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
1990 struct gallivm_state *gallivm = bld_base->base.gallivm;
1991 struct si_pipe_shader *shader = si_shader_ctx->shader;
1992 LLVMTypeRef params[21], f32, i8, i32, v2i32, v3i32;
1993 unsigned i, last_sgpr, num_params;
1994
1995 i8 = LLVMInt8TypeInContext(gallivm->context);
1996 i32 = LLVMInt32TypeInContext(gallivm->context);
1997 f32 = LLVMFloatTypeInContext(gallivm->context);
1998 v2i32 = LLVMVectorType(i32, 2);
1999 v3i32 = LLVMVectorType(i32, 3);
2000
2001 params[SI_PARAM_CONST] = LLVMPointerType(
2002 LLVMArrayType(LLVMVectorType(i8, 16), NUM_CONST_BUFFERS), CONST_ADDR_SPACE);
2003 /* We assume at most 16 textures per program at the moment.
2004 * This need probably need to be changed to support bindless textures */
2005 params[SI_PARAM_SAMPLER] = LLVMPointerType(
2006 LLVMArrayType(LLVMVectorType(i8, 16), NUM_SAMPLER_VIEWS), CONST_ADDR_SPACE);
2007 params[SI_PARAM_RESOURCE] = LLVMPointerType(
2008 LLVMArrayType(LLVMVectorType(i8, 32), NUM_SAMPLER_STATES), CONST_ADDR_SPACE);
2009
2010 switch (si_shader_ctx->type) {
2011 case TGSI_PROCESSOR_VERTEX:
2012 params[SI_PARAM_VERTEX_BUFFER] = params[SI_PARAM_CONST];
2013 params[SI_PARAM_SO_BUFFER] = params[SI_PARAM_CONST];
2014 params[SI_PARAM_START_INSTANCE] = i32;
2015 num_params = SI_PARAM_START_INSTANCE+1;
2016 if (shader->key.vs.as_es) {
2017 params[SI_PARAM_ES2GS_OFFSET] = i32;
2018 num_params++;
2019 } else {
2020 /* The locations of the other parameters are assigned dynamically. */
2021
2022 /* Streamout SGPRs. */
2023 if (shader->selector->so.num_outputs) {
2024 params[si_shader_ctx->param_streamout_config = num_params++] = i32;
2025 params[si_shader_ctx->param_streamout_write_index = num_params++] = i32;
2026 }
2027 /* A streamout buffer offset is loaded if the stride is non-zero. */
2028 for (i = 0; i < 4; i++) {
2029 if (!shader->selector->so.stride[i])
2030 continue;
2031
2032 params[si_shader_ctx->param_streamout_offset[i] = num_params++] = i32;
2033 }
2034 }
2035
2036 last_sgpr = num_params-1;
2037
2038 /* VGPRs */
2039 params[si_shader_ctx->param_vertex_id = num_params++] = i32;
2040 params[num_params++] = i32; /* unused*/
2041 params[num_params++] = i32; /* unused */
2042 params[si_shader_ctx->param_instance_id = num_params++] = i32;
2043 break;
2044
2045 case TGSI_PROCESSOR_GEOMETRY:
2046 params[SI_PARAM_GS2VS_OFFSET] = i32;
2047 params[SI_PARAM_GS_WAVE_ID] = i32;
2048 last_sgpr = SI_PARAM_GS_WAVE_ID;
2049
2050 /* VGPRs */
2051 params[SI_PARAM_VTX0_OFFSET] = i32;
2052 params[SI_PARAM_VTX1_OFFSET] = i32;
2053 params[SI_PARAM_PRIMITIVE_ID] = i32;
2054 params[SI_PARAM_VTX2_OFFSET] = i32;
2055 params[SI_PARAM_VTX3_OFFSET] = i32;
2056 params[SI_PARAM_VTX4_OFFSET] = i32;
2057 params[SI_PARAM_VTX5_OFFSET] = i32;
2058 params[SI_PARAM_GS_INSTANCE_ID] = i32;
2059 num_params = SI_PARAM_GS_INSTANCE_ID+1;
2060 break;
2061
2062 case TGSI_PROCESSOR_FRAGMENT:
2063 params[SI_PARAM_ALPHA_REF] = f32;
2064 params[SI_PARAM_PRIM_MASK] = i32;
2065 last_sgpr = SI_PARAM_PRIM_MASK;
2066 params[SI_PARAM_PERSP_SAMPLE] = v2i32;
2067 params[SI_PARAM_PERSP_CENTER] = v2i32;
2068 params[SI_PARAM_PERSP_CENTROID] = v2i32;
2069 params[SI_PARAM_PERSP_PULL_MODEL] = v3i32;
2070 params[SI_PARAM_LINEAR_SAMPLE] = v2i32;
2071 params[SI_PARAM_LINEAR_CENTER] = v2i32;
2072 params[SI_PARAM_LINEAR_CENTROID] = v2i32;
2073 params[SI_PARAM_LINE_STIPPLE_TEX] = f32;
2074 params[SI_PARAM_POS_X_FLOAT] = f32;
2075 params[SI_PARAM_POS_Y_FLOAT] = f32;
2076 params[SI_PARAM_POS_Z_FLOAT] = f32;
2077 params[SI_PARAM_POS_W_FLOAT] = f32;
2078 params[SI_PARAM_FRONT_FACE] = f32;
2079 params[SI_PARAM_ANCILLARY] = f32;
2080 params[SI_PARAM_SAMPLE_COVERAGE] = f32;
2081 params[SI_PARAM_POS_FIXED_PT] = f32;
2082 num_params = SI_PARAM_POS_FIXED_PT+1;
2083 break;
2084
2085 default:
2086 assert(0 && "unimplemented shader");
2087 return;
2088 }
2089
2090 assert(num_params <= Elements(params));
2091 radeon_llvm_create_func(&si_shader_ctx->radeon_bld, params, num_params);
2092 radeon_llvm_shader_type(si_shader_ctx->radeon_bld.main_fn, si_shader_ctx->type);
2093
2094 for (i = 0; i <= last_sgpr; ++i) {
2095 LLVMValueRef P = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, i);
2096 switch (i) {
2097 default:
2098 LLVMAddAttribute(P, LLVMInRegAttribute);
2099 break;
2100 #if HAVE_LLVM >= 0x0304
2101 /* We tell llvm that array inputs are passed by value to allow Sinking pass
2102 * to move load. Inputs are constant so this is fine. */
2103 case SI_PARAM_CONST:
2104 case SI_PARAM_SAMPLER:
2105 case SI_PARAM_RESOURCE:
2106 LLVMAddAttribute(P, LLVMByValAttribute);
2107 break;
2108 #endif
2109 }
2110 }
2111
2112 #if HAVE_LLVM >= 0x0304
2113 if (bld_base->info &&
2114 (bld_base->info->opcode_count[TGSI_OPCODE_DDX] > 0 ||
2115 bld_base->info->opcode_count[TGSI_OPCODE_DDY] > 0))
2116 si_shader_ctx->ddxy_lds =
2117 LLVMAddGlobalInAddressSpace(gallivm->module,
2118 LLVMArrayType(i32, 64),
2119 "ddxy_lds",
2120 LOCAL_ADDR_SPACE);
2121 #endif
2122 }
2123
2124 static void preload_constants(struct si_shader_context *si_shader_ctx)
2125 {
2126 struct lp_build_tgsi_context * bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
2127 struct gallivm_state * gallivm = bld_base->base.gallivm;
2128 const struct tgsi_shader_info * info = bld_base->info;
2129 unsigned buf;
2130 LLVMValueRef ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
2131
2132 for (buf = 0; buf < NUM_CONST_BUFFERS; buf++) {
2133 unsigned i, num_const = info->const_file_max[buf] + 1;
2134
2135 if (num_const == 0)
2136 continue;
2137
2138 /* Allocate space for the constant values */
2139 si_shader_ctx->constants[buf] = CALLOC(num_const * 4, sizeof(LLVMValueRef));
2140
2141 /* Load the resource descriptor */
2142 si_shader_ctx->const_resource[buf] =
2143 build_indexed_load(si_shader_ctx, ptr, lp_build_const_int32(gallivm, buf));
2144
2145 /* Load the constants, we rely on the code sinking to do the rest */
2146 for (i = 0; i < num_const * 4; ++i) {
2147 LLVMValueRef args[2] = {
2148 si_shader_ctx->const_resource[buf],
2149 lp_build_const_int32(gallivm, i * 4)
2150 };
2151 si_shader_ctx->constants[buf][i] =
2152 build_intrinsic(gallivm->builder, "llvm.SI.load.const",
2153 bld_base->base.elem_type, args, 2,
2154 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
2155 }
2156 }
2157 }
2158
2159 static void preload_samplers(struct si_shader_context *si_shader_ctx)
2160 {
2161 struct lp_build_tgsi_context * bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
2162 struct gallivm_state * gallivm = bld_base->base.gallivm;
2163 const struct tgsi_shader_info * info = bld_base->info;
2164
2165 unsigned i, num_samplers = info->file_max[TGSI_FILE_SAMPLER] + 1;
2166
2167 LLVMValueRef res_ptr, samp_ptr;
2168 LLVMValueRef offset;
2169
2170 if (num_samplers == 0)
2171 return;
2172
2173 /* Allocate space for the values */
2174 si_shader_ctx->resources = CALLOC(NUM_SAMPLER_VIEWS, sizeof(LLVMValueRef));
2175 si_shader_ctx->samplers = CALLOC(num_samplers, sizeof(LLVMValueRef));
2176
2177 res_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_RESOURCE);
2178 samp_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_SAMPLER);
2179
2180 /* Load the resources and samplers, we rely on the code sinking to do the rest */
2181 for (i = 0; i < num_samplers; ++i) {
2182 /* Resource */
2183 offset = lp_build_const_int32(gallivm, i);
2184 si_shader_ctx->resources[i] = build_indexed_load(si_shader_ctx, res_ptr, offset);
2185
2186 /* Sampler */
2187 offset = lp_build_const_int32(gallivm, i);
2188 si_shader_ctx->samplers[i] = build_indexed_load(si_shader_ctx, samp_ptr, offset);
2189
2190 /* FMASK resource */
2191 if (info->is_msaa_sampler[i]) {
2192 offset = lp_build_const_int32(gallivm, FMASK_TEX_OFFSET + i);
2193 si_shader_ctx->resources[FMASK_TEX_OFFSET + i] =
2194 build_indexed_load(si_shader_ctx, res_ptr, offset);
2195 }
2196 }
2197 }
2198
2199 static void preload_streamout_buffers(struct si_shader_context *si_shader_ctx)
2200 {
2201 struct lp_build_tgsi_context * bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
2202 struct gallivm_state * gallivm = bld_base->base.gallivm;
2203 unsigned i;
2204
2205 if (!si_shader_ctx->shader->selector->so.num_outputs)
2206 return;
2207
2208 LLVMValueRef buf_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
2209 SI_PARAM_SO_BUFFER);
2210
2211 /* Load the resources, we rely on the code sinking to do the rest */
2212 for (i = 0; i < 4; ++i) {
2213 if (si_shader_ctx->shader->selector->so.stride[i]) {
2214 LLVMValueRef offset = lp_build_const_int32(gallivm, i);
2215
2216 si_shader_ctx->so_buffers[i] = build_indexed_load(si_shader_ctx, buf_ptr, offset);
2217 }
2218 }
2219 }
2220
2221 int si_compile_llvm(struct si_context *sctx, struct si_pipe_shader *shader,
2222 LLVMModuleRef mod)
2223 {
2224 unsigned i;
2225 uint32_t *ptr;
2226 struct radeon_llvm_binary binary;
2227 bool dump = r600_can_dump_shader(&sctx->screen->b,
2228 shader->selector ? shader->selector->tokens : NULL);
2229 memset(&binary, 0, sizeof(binary));
2230 radeon_llvm_compile(mod, &binary,
2231 r600_get_llvm_processor_name(sctx->screen->b.family), dump);
2232 if (dump && ! binary.disassembled) {
2233 fprintf(stderr, "SI CODE:\n");
2234 for (i = 0; i < binary.code_size; i+=4 ) {
2235 fprintf(stderr, "%02x%02x%02x%02x\n", binary.code[i + 3],
2236 binary.code[i + 2], binary.code[i + 1],
2237 binary.code[i]);
2238 }
2239 }
2240
2241 /* XXX: We may be able to emit some of these values directly rather than
2242 * extracting fields to be emitted later.
2243 */
2244 for (i = 0; i < binary.config_size; i+= 8) {
2245 unsigned reg = util_le32_to_cpu(*(uint32_t*)(binary.config + i));
2246 unsigned value = util_le32_to_cpu(*(uint32_t*)(binary.config + i + 4));
2247 switch (reg) {
2248 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
2249 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
2250 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
2251 case R_00B848_COMPUTE_PGM_RSRC1:
2252 shader->num_sgprs = (G_00B028_SGPRS(value) + 1) * 8;
2253 shader->num_vgprs = (G_00B028_VGPRS(value) + 1) * 4;
2254 break;
2255 case R_00B02C_SPI_SHADER_PGM_RSRC2_PS:
2256 shader->lds_size = G_00B02C_EXTRA_LDS_SIZE(value);
2257 break;
2258 case R_00B84C_COMPUTE_PGM_RSRC2:
2259 shader->lds_size = G_00B84C_LDS_SIZE(value);
2260 break;
2261 case R_0286CC_SPI_PS_INPUT_ENA:
2262 shader->spi_ps_input_ena = value;
2263 break;
2264 default:
2265 fprintf(stderr, "Warning: Compiler emitted unknown "
2266 "config register: 0x%x\n", reg);
2267 break;
2268 }
2269 }
2270
2271 /* copy new shader */
2272 r600_resource_reference(&shader->bo, NULL);
2273 shader->bo = si_resource_create_custom(sctx->b.b.screen, PIPE_USAGE_IMMUTABLE,
2274 binary.code_size);
2275 if (shader->bo == NULL) {
2276 return -ENOMEM;
2277 }
2278
2279 ptr = (uint32_t*)sctx->b.ws->buffer_map(shader->bo->cs_buf, sctx->b.rings.gfx.cs, PIPE_TRANSFER_WRITE);
2280 if (0 /*SI_BIG_ENDIAN*/) {
2281 for (i = 0; i < binary.code_size / 4; ++i) {
2282 ptr[i] = util_bswap32(*(uint32_t*)(binary.code + i*4));
2283 }
2284 } else {
2285 memcpy(ptr, binary.code, binary.code_size);
2286 }
2287 sctx->b.ws->buffer_unmap(shader->bo->cs_buf);
2288
2289 free(binary.code);
2290 free(binary.config);
2291
2292 return 0;
2293 }
2294
2295 /* Generate code for the hardware VS shader stage to go with a geometry shader */
2296 static int si_generate_gs_copy_shader(struct si_context *sctx,
2297 struct si_shader_context *si_shader_ctx,
2298 bool dump)
2299 {
2300 struct gallivm_state *gallivm = &si_shader_ctx->radeon_bld.gallivm;
2301 struct lp_build_tgsi_context *bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
2302 struct lp_build_context *base = &bld_base->base;
2303 struct lp_build_context *uint = &bld_base->uint_bld;
2304 struct si_shader *gs = &si_shader_ctx->shader->selector->current->shader;
2305 struct si_shader_output_values *outputs;
2306 LLVMValueRef t_list_ptr, t_list;
2307 LLVMValueRef args[9];
2308 int i, r;
2309
2310 outputs = MALLOC(gs->noutput * sizeof(outputs[0]));
2311
2312 si_shader_ctx->type = TGSI_PROCESSOR_VERTEX;
2313 si_shader_ctx->gs_for_vs = gs;
2314
2315 radeon_llvm_context_init(&si_shader_ctx->radeon_bld);
2316
2317 create_meta_data(si_shader_ctx);
2318 create_function(si_shader_ctx);
2319 preload_streamout_buffers(si_shader_ctx);
2320
2321 /* Load the GSVS ring resource descriptor */
2322 t_list_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
2323 t_list = build_indexed_load(si_shader_ctx, t_list_ptr,
2324 lp_build_const_int32(gallivm,
2325 NUM_PIPE_CONST_BUFFERS + 1));
2326
2327 args[0] = t_list;
2328 args[1] = lp_build_mul_imm(uint,
2329 LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
2330 si_shader_ctx->param_vertex_id),
2331 4);
2332 args[3] = uint->zero;
2333 args[4] = uint->one; /* OFFEN */
2334 args[5] = uint->zero; /* IDXEN */
2335 args[6] = uint->one; /* GLC */
2336 args[7] = uint->one; /* SLC */
2337 args[8] = uint->zero; /* TFE */
2338
2339 /* Fetch vertex data from GSVS ring */
2340 for (i = 0; i < gs->noutput; ++i) {
2341 struct si_shader_output *out = gs->output + i;
2342 unsigned chan;
2343
2344 outputs[i].name = out->name;
2345 outputs[i].index = out->index;
2346 outputs[i].usage = out->usage;
2347
2348 for (chan = 0; chan < 4; chan++) {
2349 args[2] = lp_build_const_int32(gallivm,
2350 (i * 4 + chan) *
2351 gs->gs_max_out_vertices * 16 * 4);
2352
2353 outputs[i].values[chan] =
2354 LLVMBuildBitCast(gallivm->builder,
2355 build_intrinsic(gallivm->builder,
2356 "llvm.SI.buffer.load.dword.i32.i32",
2357 LLVMInt32TypeInContext(gallivm->context),
2358 args, 9,
2359 LLVMReadOnlyAttribute | LLVMNoUnwindAttribute),
2360 base->elem_type, "");
2361 }
2362 }
2363
2364 si_llvm_export_vs(bld_base, outputs, gs->noutput);
2365
2366 radeon_llvm_finalize_module(&si_shader_ctx->radeon_bld);
2367
2368 if (dump)
2369 fprintf(stderr, "Copy Vertex Shader for Geometry Shader:\n\n");
2370
2371 r = si_compile_llvm(sctx, si_shader_ctx->shader,
2372 bld_base->base.gallivm->module);
2373
2374 radeon_llvm_dispose(&si_shader_ctx->radeon_bld);
2375
2376 FREE(outputs);
2377 return r;
2378 }
2379
2380 int si_pipe_shader_create(
2381 struct pipe_context *ctx,
2382 struct si_pipe_shader *shader)
2383 {
2384 struct si_context *sctx = (struct si_context*)ctx;
2385 struct si_pipe_shader_selector *sel = shader->selector;
2386 struct si_shader_context si_shader_ctx;
2387 struct tgsi_shader_info shader_info;
2388 struct lp_build_tgsi_context * bld_base;
2389 LLVMModuleRef mod;
2390 int r = 0;
2391 bool dump = r600_can_dump_shader(&sctx->screen->b, shader->selector->tokens);
2392
2393 assert(shader->shader.noutput == 0);
2394 assert(shader->shader.ninterp == 0);
2395 assert(shader->shader.ninput == 0);
2396
2397 memset(&si_shader_ctx, 0, sizeof(si_shader_ctx));
2398 radeon_llvm_context_init(&si_shader_ctx.radeon_bld);
2399 bld_base = &si_shader_ctx.radeon_bld.soa.bld_base;
2400
2401 tgsi_scan_shader(sel->tokens, &shader_info);
2402
2403 shader->shader.uses_kill = shader_info.uses_kill;
2404 shader->shader.uses_instanceid = shader_info.uses_instanceid;
2405 bld_base->info = &shader_info;
2406 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
2407
2408 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
2409 bld_base->op_actions[TGSI_OPCODE_TXB] = txb_action;
2410 #if HAVE_LLVM >= 0x0304
2411 bld_base->op_actions[TGSI_OPCODE_TXD] = txd_action;
2412 #endif
2413 bld_base->op_actions[TGSI_OPCODE_TXF] = txf_action;
2414 bld_base->op_actions[TGSI_OPCODE_TXL] = txl_action;
2415 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
2416 bld_base->op_actions[TGSI_OPCODE_TXQ] = txq_action;
2417
2418 #if HAVE_LLVM >= 0x0304
2419 bld_base->op_actions[TGSI_OPCODE_DDX].emit = si_llvm_emit_ddxy;
2420 bld_base->op_actions[TGSI_OPCODE_DDY].emit = si_llvm_emit_ddxy;
2421 #endif
2422
2423 bld_base->op_actions[TGSI_OPCODE_EMIT].emit = si_llvm_emit_vertex;
2424 bld_base->op_actions[TGSI_OPCODE_ENDPRIM].emit = si_llvm_emit_primitive;
2425
2426 si_shader_ctx.radeon_bld.load_system_value = declare_system_value;
2427 si_shader_ctx.tokens = sel->tokens;
2428 tgsi_parse_init(&si_shader_ctx.parse, si_shader_ctx.tokens);
2429 si_shader_ctx.shader = shader;
2430 si_shader_ctx.type = si_shader_ctx.parse.FullHeader.Processor.Processor;
2431
2432 switch (si_shader_ctx.type) {
2433 case TGSI_PROCESSOR_VERTEX:
2434 si_shader_ctx.radeon_bld.load_input = declare_input_vs;
2435 if (shader->key.vs.as_es) {
2436 si_shader_ctx.gs_for_vs = &sctx->gs_shader->current->shader;
2437 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
2438 } else {
2439 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
2440 }
2441 break;
2442 case TGSI_PROCESSOR_GEOMETRY: {
2443 int i;
2444
2445 si_shader_ctx.radeon_bld.load_input = declare_input_gs;
2446 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_gs;
2447 bld_base->emit_epilogue = si_llvm_emit_gs_epilogue;
2448
2449 for (i = 0; i < shader_info.num_properties; i++) {
2450 switch (shader_info.properties[i].name) {
2451 case TGSI_PROPERTY_GS_INPUT_PRIM:
2452 shader->shader.gs_input_prim = shader_info.properties[i].data[0];
2453 break;
2454 case TGSI_PROPERTY_GS_OUTPUT_PRIM:
2455 shader->shader.gs_output_prim = shader_info.properties[i].data[0];
2456 break;
2457 case TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES:
2458 shader->shader.gs_max_out_vertices = shader_info.properties[i].data[0];
2459 break;
2460 }
2461 }
2462 break;
2463 }
2464 case TGSI_PROCESSOR_FRAGMENT:
2465 si_shader_ctx.radeon_bld.load_input = declare_input_fs;
2466 bld_base->emit_epilogue = si_llvm_emit_fs_epilogue;
2467 break;
2468 default:
2469 assert(!"Unsupported shader type");
2470 return -1;
2471 }
2472
2473 create_meta_data(&si_shader_ctx);
2474 create_function(&si_shader_ctx);
2475 preload_constants(&si_shader_ctx);
2476 preload_samplers(&si_shader_ctx);
2477 preload_streamout_buffers(&si_shader_ctx);
2478
2479 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
2480 * conversion fails. */
2481 if (dump) {
2482 tgsi_dump(sel->tokens, 0);
2483 si_dump_streamout(&sel->so);
2484 }
2485
2486 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
2487 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
2488 goto out;
2489 }
2490
2491 radeon_llvm_finalize_module(&si_shader_ctx.radeon_bld);
2492
2493 mod = bld_base->base.gallivm->module;
2494 r = si_compile_llvm(sctx, shader, mod);
2495 if (r) {
2496 fprintf(stderr, "LLVM failed to compile shader\n");
2497 goto out;
2498 }
2499
2500 radeon_llvm_dispose(&si_shader_ctx.radeon_bld);
2501
2502 if (si_shader_ctx.type == TGSI_PROCESSOR_GEOMETRY) {
2503 shader->gs_copy_shader = CALLOC_STRUCT(si_pipe_shader);
2504 shader->gs_copy_shader->selector = shader->selector;
2505 si_shader_ctx.shader = shader->gs_copy_shader;
2506 if ((r = si_generate_gs_copy_shader(sctx, &si_shader_ctx, dump))) {
2507 free(shader->gs_copy_shader);
2508 shader->gs_copy_shader = NULL;
2509 goto out;
2510 }
2511 }
2512
2513 tgsi_parse_free(&si_shader_ctx.parse);
2514
2515 out:
2516 for (int i = 0; i < NUM_CONST_BUFFERS; i++)
2517 FREE(si_shader_ctx.constants[i]);
2518 FREE(si_shader_ctx.resources);
2519 FREE(si_shader_ctx.samplers);
2520
2521 return r;
2522 }
2523
2524 void si_pipe_shader_destroy(struct pipe_context *ctx, struct si_pipe_shader *shader)
2525 {
2526 r600_resource_reference(&shader->bo, NULL);
2527 }