radeonsi: add the vertex shader position output if it's missing
[mesa.git] / src / gallium / drivers / radeonsi / radeonsi_shader.c
1
2 /*
3 * Copyright 2012 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Tom Stellard <thomas.stellard@amd.com>
26 * Michel Dänzer <michel.daenzer@amd.com>
27 * Christian König <christian.koenig@amd.com>
28 */
29
30 #include "gallivm/lp_bld_tgsi_action.h"
31 #include "gallivm/lp_bld_const.h"
32 #include "gallivm/lp_bld_gather.h"
33 #include "gallivm/lp_bld_intr.h"
34 #include "gallivm/lp_bld_logic.h"
35 #include "gallivm/lp_bld_tgsi.h"
36 #include "gallivm/lp_bld_arit.h"
37 #include "gallivm/lp_bld_flow.h"
38 #include "radeon_llvm.h"
39 #include "radeon_llvm_emit.h"
40 #include "util/u_memory.h"
41 #include "tgsi/tgsi_info.h"
42 #include "tgsi/tgsi_parse.h"
43 #include "tgsi/tgsi_scan.h"
44 #include "tgsi/tgsi_util.h"
45 #include "tgsi/tgsi_dump.h"
46
47 #include "radeonsi_pipe.h"
48 #include "radeonsi_shader.h"
49 #include "si_state.h"
50 #include "sid.h"
51
52 #include <assert.h>
53 #include <errno.h>
54 #include <stdio.h>
55
56 struct si_shader_context
57 {
58 struct radeon_llvm_context radeon_bld;
59 struct tgsi_parse_context parse;
60 struct tgsi_token * tokens;
61 struct si_pipe_shader *shader;
62 unsigned type; /* TGSI_PROCESSOR_* specifies the type of shader. */
63 int param_streamout_config;
64 int param_streamout_write_index;
65 int param_streamout_offset[4];
66 int param_vertex_id;
67 int param_instance_id;
68 LLVMValueRef const_md;
69 LLVMValueRef const_resource;
70 #if HAVE_LLVM >= 0x0304
71 LLVMValueRef ddxy_lds;
72 #endif
73 LLVMValueRef *constants;
74 LLVMValueRef *resources;
75 LLVMValueRef *samplers;
76 LLVMValueRef so_buffers[4];
77 };
78
79 static struct si_shader_context * si_shader_context(
80 struct lp_build_tgsi_context * bld_base)
81 {
82 return (struct si_shader_context *)bld_base;
83 }
84
85
86 #define PERSPECTIVE_BASE 0
87 #define LINEAR_BASE 9
88
89 #define SAMPLE_OFFSET 0
90 #define CENTER_OFFSET 2
91 #define CENTROID_OFSET 4
92
93 #define USE_SGPR_MAX_SUFFIX_LEN 5
94 #define CONST_ADDR_SPACE 2
95 #define LOCAL_ADDR_SPACE 3
96 #define USER_SGPR_ADDR_SPACE 8
97
98 /**
99 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad
100 *
101 * @param offset The offset parameter specifies the number of
102 * elements to offset, not the number of bytes or dwords. An element is the
103 * the type pointed to by the base_ptr parameter (e.g. int is the element of
104 * an int* pointer)
105 *
106 * When LLVM lowers the load instruction, it will convert the element offset
107 * into a dword offset automatically.
108 *
109 */
110 static LLVMValueRef build_indexed_load(
111 struct si_shader_context * si_shader_ctx,
112 LLVMValueRef base_ptr,
113 LLVMValueRef offset)
114 {
115 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
116
117 LLVMValueRef indices[2] = {
118 LLVMConstInt(LLVMInt64TypeInContext(base->gallivm->context), 0, false),
119 offset
120 };
121 LLVMValueRef computed_ptr = LLVMBuildGEP(
122 base->gallivm->builder, base_ptr, indices, 2, "");
123
124 LLVMValueRef result = LLVMBuildLoad(base->gallivm->builder, computed_ptr, "");
125 LLVMSetMetadata(result, 1, si_shader_ctx->const_md);
126 return result;
127 }
128
129 static LLVMValueRef get_instance_index_for_fetch(
130 struct radeon_llvm_context * radeon_bld,
131 unsigned divisor)
132 {
133 struct si_shader_context *si_shader_ctx =
134 si_shader_context(&radeon_bld->soa.bld_base);
135 struct gallivm_state * gallivm = radeon_bld->soa.bld_base.base.gallivm;
136
137 LLVMValueRef result = LLVMGetParam(radeon_bld->main_fn,
138 si_shader_ctx->param_instance_id);
139 result = LLVMBuildAdd(gallivm->builder, result, LLVMGetParam(
140 radeon_bld->main_fn, SI_PARAM_START_INSTANCE), "");
141
142 if (divisor > 1)
143 result = LLVMBuildUDiv(gallivm->builder, result,
144 lp_build_const_int32(gallivm, divisor), "");
145
146 return result;
147 }
148
149 static void declare_input_vs(
150 struct si_shader_context * si_shader_ctx,
151 unsigned input_index,
152 const struct tgsi_full_declaration *decl)
153 {
154 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
155 unsigned divisor = si_shader_ctx->shader->key.vs.instance_divisors[input_index];
156
157 unsigned chan;
158
159 LLVMValueRef t_list_ptr;
160 LLVMValueRef t_offset;
161 LLVMValueRef t_list;
162 LLVMValueRef attribute_offset;
163 LLVMValueRef buffer_index;
164 LLVMValueRef args[3];
165 LLVMTypeRef vec4_type;
166 LLVMValueRef input;
167
168 /* Load the T list */
169 t_list_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_VERTEX_BUFFER);
170
171 t_offset = lp_build_const_int32(base->gallivm, input_index);
172
173 t_list = build_indexed_load(si_shader_ctx, t_list_ptr, t_offset);
174
175 /* Build the attribute offset */
176 attribute_offset = lp_build_const_int32(base->gallivm, 0);
177
178 if (divisor) {
179 /* Build index from instance ID, start instance and divisor */
180 si_shader_ctx->shader->shader.uses_instanceid = true;
181 buffer_index = get_instance_index_for_fetch(&si_shader_ctx->radeon_bld, divisor);
182 } else {
183 /* Load the buffer index, which is always stored in VGPR0
184 * for Vertex Shaders */
185 buffer_index = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
186 si_shader_ctx->param_vertex_id);
187 }
188
189 vec4_type = LLVMVectorType(base->elem_type, 4);
190 args[0] = t_list;
191 args[1] = attribute_offset;
192 args[2] = buffer_index;
193 input = build_intrinsic(base->gallivm->builder,
194 "llvm.SI.vs.load.input", vec4_type, args, 3,
195 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
196
197 /* Break up the vec4 into individual components */
198 for (chan = 0; chan < 4; chan++) {
199 LLVMValueRef llvm_chan = lp_build_const_int32(base->gallivm, chan);
200 /* XXX: Use a helper function for this. There is one in
201 * tgsi_llvm.c. */
202 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, chan)] =
203 LLVMBuildExtractElement(base->gallivm->builder,
204 input, llvm_chan, "");
205 }
206 }
207
208 static void declare_input_fs(
209 struct si_shader_context * si_shader_ctx,
210 unsigned input_index,
211 const struct tgsi_full_declaration *decl)
212 {
213 struct si_shader *shader = &si_shader_ctx->shader->shader;
214 struct lp_build_context * base =
215 &si_shader_ctx->radeon_bld.soa.bld_base.base;
216 struct lp_build_context *uint =
217 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
218 struct gallivm_state * gallivm = base->gallivm;
219 LLVMTypeRef input_type = LLVMFloatTypeInContext(gallivm->context);
220 LLVMValueRef main_fn = si_shader_ctx->radeon_bld.main_fn;
221
222 LLVMValueRef interp_param;
223 const char * intr_name;
224
225 /* This value is:
226 * [15:0] NewPrimMask (Bit mask for each quad. It is set it the
227 * quad begins a new primitive. Bit 0 always needs
228 * to be unset)
229 * [32:16] ParamOffset
230 *
231 */
232 LLVMValueRef params = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_PRIM_MASK);
233 LLVMValueRef attr_number;
234
235 unsigned chan;
236
237 if (decl->Semantic.Name == TGSI_SEMANTIC_POSITION) {
238 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
239 unsigned soa_index =
240 radeon_llvm_reg_index_soa(input_index, chan);
241 si_shader_ctx->radeon_bld.inputs[soa_index] =
242 LLVMGetParam(main_fn, SI_PARAM_POS_X_FLOAT + chan);
243
244 if (chan == 3)
245 /* RCP for fragcoord.w */
246 si_shader_ctx->radeon_bld.inputs[soa_index] =
247 LLVMBuildFDiv(gallivm->builder,
248 lp_build_const_float(gallivm, 1.0f),
249 si_shader_ctx->radeon_bld.inputs[soa_index],
250 "");
251 }
252 return;
253 }
254
255 if (decl->Semantic.Name == TGSI_SEMANTIC_FACE) {
256 LLVMValueRef face, is_face_positive;
257
258 face = LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE);
259
260 is_face_positive = LLVMBuildFCmp(gallivm->builder,
261 LLVMRealUGT, face,
262 lp_build_const_float(gallivm, 0.0f),
263 "");
264
265 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 0)] =
266 LLVMBuildSelect(gallivm->builder,
267 is_face_positive,
268 lp_build_const_float(gallivm, 1.0f),
269 lp_build_const_float(gallivm, 0.0f),
270 "");
271 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 1)] =
272 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 2)] =
273 lp_build_const_float(gallivm, 0.0f);
274 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 3)] =
275 lp_build_const_float(gallivm, 1.0f);
276
277 return;
278 }
279
280 shader->input[input_index].param_offset = shader->ninterp++;
281 attr_number = lp_build_const_int32(gallivm,
282 shader->input[input_index].param_offset);
283
284 /* XXX: Handle all possible interpolation modes */
285 switch (decl->Interp.Interpolate) {
286 case TGSI_INTERPOLATE_COLOR:
287 if (si_shader_ctx->shader->key.ps.flatshade) {
288 interp_param = 0;
289 } else {
290 if (decl->Interp.Centroid)
291 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTROID);
292 else
293 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTER);
294 }
295 break;
296 case TGSI_INTERPOLATE_CONSTANT:
297 interp_param = 0;
298 break;
299 case TGSI_INTERPOLATE_LINEAR:
300 if (decl->Interp.Centroid)
301 interp_param = LLVMGetParam(main_fn, SI_PARAM_LINEAR_CENTROID);
302 else
303 interp_param = LLVMGetParam(main_fn, SI_PARAM_LINEAR_CENTER);
304 break;
305 case TGSI_INTERPOLATE_PERSPECTIVE:
306 if (decl->Interp.Centroid)
307 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTROID);
308 else
309 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTER);
310 break;
311 default:
312 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
313 return;
314 }
315
316 intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
317
318 /* XXX: Could there be more than TGSI_NUM_CHANNELS (4) ? */
319 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR &&
320 si_shader_ctx->shader->key.ps.color_two_side) {
321 LLVMValueRef args[4];
322 LLVMValueRef face, is_face_positive;
323 LLVMValueRef back_attr_number =
324 lp_build_const_int32(gallivm,
325 shader->input[input_index].param_offset + 1);
326
327 face = LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE);
328
329 is_face_positive = LLVMBuildFCmp(gallivm->builder,
330 LLVMRealUGT, face,
331 lp_build_const_float(gallivm, 0.0f),
332 "");
333
334 args[2] = params;
335 args[3] = interp_param;
336 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
337 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
338 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
339 LLVMValueRef front, back;
340
341 args[0] = llvm_chan;
342 args[1] = attr_number;
343 front = build_intrinsic(base->gallivm->builder, intr_name,
344 input_type, args, args[3] ? 4 : 3,
345 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
346
347 args[1] = back_attr_number;
348 back = build_intrinsic(base->gallivm->builder, intr_name,
349 input_type, args, args[3] ? 4 : 3,
350 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
351
352 si_shader_ctx->radeon_bld.inputs[soa_index] =
353 LLVMBuildSelect(gallivm->builder,
354 is_face_positive,
355 front,
356 back,
357 "");
358 }
359
360 shader->ninterp++;
361 } else if (decl->Semantic.Name == TGSI_SEMANTIC_FOG) {
362 LLVMValueRef args[4];
363
364 args[0] = uint->zero;
365 args[1] = attr_number;
366 args[2] = params;
367 args[3] = interp_param;
368 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 0)] =
369 build_intrinsic(base->gallivm->builder, intr_name,
370 input_type, args, args[3] ? 4 : 3,
371 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
372 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 1)] =
373 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 2)] =
374 lp_build_const_float(gallivm, 0.0f);
375 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 3)] =
376 lp_build_const_float(gallivm, 1.0f);
377 } else {
378 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
379 LLVMValueRef args[4];
380 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
381 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
382 args[0] = llvm_chan;
383 args[1] = attr_number;
384 args[2] = params;
385 args[3] = interp_param;
386 si_shader_ctx->radeon_bld.inputs[soa_index] =
387 build_intrinsic(base->gallivm->builder, intr_name,
388 input_type, args, args[3] ? 4 : 3,
389 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
390 }
391 }
392 }
393
394 static void declare_input(
395 struct radeon_llvm_context * radeon_bld,
396 unsigned input_index,
397 const struct tgsi_full_declaration *decl)
398 {
399 struct si_shader_context * si_shader_ctx =
400 si_shader_context(&radeon_bld->soa.bld_base);
401 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
402 declare_input_vs(si_shader_ctx, input_index, decl);
403 } else if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
404 declare_input_fs(si_shader_ctx, input_index, decl);
405 } else {
406 fprintf(stderr, "Warning: Unsupported shader type,\n");
407 }
408 }
409
410 static void declare_system_value(
411 struct radeon_llvm_context * radeon_bld,
412 unsigned index,
413 const struct tgsi_full_declaration *decl)
414 {
415 struct si_shader_context *si_shader_ctx =
416 si_shader_context(&radeon_bld->soa.bld_base);
417 LLVMValueRef value = 0;
418
419 switch (decl->Semantic.Name) {
420 case TGSI_SEMANTIC_INSTANCEID:
421 value = LLVMGetParam(radeon_bld->main_fn,
422 si_shader_ctx->param_instance_id);
423 break;
424
425 case TGSI_SEMANTIC_VERTEXID:
426 value = LLVMGetParam(radeon_bld->main_fn,
427 si_shader_ctx->param_vertex_id);
428 break;
429
430 default:
431 assert(!"unknown system value");
432 return;
433 }
434
435 radeon_bld->system_values[index] = value;
436 }
437
438 static LLVMValueRef fetch_constant(
439 struct lp_build_tgsi_context * bld_base,
440 const struct tgsi_full_src_register *reg,
441 enum tgsi_opcode_type type,
442 unsigned swizzle)
443 {
444 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
445 struct lp_build_context * base = &bld_base->base;
446 const struct tgsi_ind_register *ireg = &reg->Indirect;
447 unsigned idx;
448
449 LLVMValueRef args[2];
450 LLVMValueRef addr;
451 LLVMValueRef result;
452
453 if (swizzle == LP_CHAN_ALL) {
454 unsigned chan;
455 LLVMValueRef values[4];
456 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
457 values[chan] = fetch_constant(bld_base, reg, type, chan);
458
459 return lp_build_gather_values(bld_base->base.gallivm, values, 4);
460 }
461
462 idx = reg->Register.Index * 4 + swizzle;
463 if (!reg->Register.Indirect)
464 return bitcast(bld_base, type, si_shader_ctx->constants[idx]);
465
466 args[0] = si_shader_ctx->const_resource;
467 args[1] = lp_build_const_int32(base->gallivm, idx * 4);
468 addr = si_shader_ctx->radeon_bld.soa.addr[ireg->Index][ireg->Swizzle];
469 addr = LLVMBuildLoad(base->gallivm->builder, addr, "load addr reg");
470 addr = lp_build_mul_imm(&bld_base->uint_bld, addr, 16);
471 args[1] = lp_build_add(&bld_base->uint_bld, addr, args[1]);
472
473 result = build_intrinsic(base->gallivm->builder, "llvm.SI.load.const", base->elem_type,
474 args, 2, LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
475
476 return bitcast(bld_base, type, result);
477 }
478
479 /* Initialize arguments for the shader export intrinsic */
480 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
481 struct tgsi_full_declaration *d,
482 unsigned index,
483 unsigned target,
484 LLVMValueRef *args)
485 {
486 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
487 struct lp_build_context *uint =
488 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
489 struct lp_build_context *base = &bld_base->base;
490 unsigned compressed = 0;
491 unsigned chan;
492
493 if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
494 int cbuf = target - V_008DFC_SQ_EXP_MRT;
495
496 if (cbuf >= 0 && cbuf < 8) {
497 compressed = (si_shader_ctx->shader->key.ps.export_16bpc >> cbuf) & 0x1;
498
499 if (compressed)
500 si_shader_ctx->shader->spi_shader_col_format |=
501 V_028714_SPI_SHADER_FP16_ABGR << (4 * cbuf);
502 else
503 si_shader_ctx->shader->spi_shader_col_format |=
504 V_028714_SPI_SHADER_32_ABGR << (4 * cbuf);
505
506 si_shader_ctx->shader->cb_shader_mask |= 0xf << (4 * cbuf);
507 }
508 }
509
510 if (compressed) {
511 /* Pixel shader needs to pack output values before export */
512 for (chan = 0; chan < 2; chan++ ) {
513 LLVMValueRef *out_ptr =
514 si_shader_ctx->radeon_bld.soa.outputs[index];
515 args[0] = LLVMBuildLoad(base->gallivm->builder,
516 out_ptr[2 * chan], "");
517 args[1] = LLVMBuildLoad(base->gallivm->builder,
518 out_ptr[2 * chan + 1], "");
519 args[chan + 5] =
520 build_intrinsic(base->gallivm->builder,
521 "llvm.SI.packf16",
522 LLVMInt32TypeInContext(base->gallivm->context),
523 args, 2,
524 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
525 args[chan + 7] = args[chan + 5] =
526 LLVMBuildBitCast(base->gallivm->builder,
527 args[chan + 5],
528 LLVMFloatTypeInContext(base->gallivm->context),
529 "");
530 }
531
532 /* Set COMPR flag */
533 args[4] = uint->one;
534 } else {
535 for (chan = 0; chan < 4; chan++ ) {
536 LLVMValueRef out_ptr =
537 si_shader_ctx->radeon_bld.soa.outputs[index][chan];
538 /* +5 because the first output value will be
539 * the 6th argument to the intrinsic. */
540 args[chan + 5] = LLVMBuildLoad(base->gallivm->builder,
541 out_ptr, "");
542 }
543
544 /* Clear COMPR flag */
545 args[4] = uint->zero;
546 }
547
548 /* XXX: This controls which components of the output
549 * registers actually get exported. (e.g bit 0 means export
550 * X component, bit 1 means export Y component, etc.) I'm
551 * hard coding this to 0xf for now. In the future, we might
552 * want to do something else. */
553 args[0] = lp_build_const_int32(base->gallivm, 0xf);
554
555 /* Specify whether the EXEC mask represents the valid mask */
556 args[1] = uint->zero;
557
558 /* Specify whether this is the last export */
559 args[2] = uint->zero;
560
561 /* Specify the target we are exporting */
562 args[3] = lp_build_const_int32(base->gallivm, target);
563
564 /* XXX: We probably need to keep track of the output
565 * values, so we know what we are passing to the next
566 * stage. */
567 }
568
569 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
570 unsigned index)
571 {
572 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
573 struct gallivm_state *gallivm = bld_base->base.gallivm;
574
575 if (si_shader_ctx->shader->key.ps.alpha_func != PIPE_FUNC_NEVER) {
576 LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][3];
577 LLVMValueRef alpha_ref = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
578 SI_PARAM_ALPHA_REF);
579
580 LLVMValueRef alpha_pass =
581 lp_build_cmp(&bld_base->base,
582 si_shader_ctx->shader->key.ps.alpha_func,
583 LLVMBuildLoad(gallivm->builder, out_ptr, ""),
584 alpha_ref);
585 LLVMValueRef arg =
586 lp_build_select(&bld_base->base,
587 alpha_pass,
588 lp_build_const_float(gallivm, 1.0f),
589 lp_build_const_float(gallivm, -1.0f));
590
591 build_intrinsic(gallivm->builder,
592 "llvm.AMDGPU.kill",
593 LLVMVoidTypeInContext(gallivm->context),
594 &arg, 1, 0);
595 } else {
596 build_intrinsic(gallivm->builder,
597 "llvm.AMDGPU.kilp",
598 LLVMVoidTypeInContext(gallivm->context),
599 NULL, 0, 0);
600 }
601 }
602
603 static void si_alpha_to_one(struct lp_build_tgsi_context *bld_base,
604 unsigned index)
605 {
606 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
607
608 /* set alpha to one */
609 LLVMBuildStore(bld_base->base.gallivm->builder,
610 bld_base->base.one,
611 si_shader_ctx->radeon_bld.soa.outputs[index][3]);
612 }
613
614 static void si_llvm_emit_clipvertex(struct lp_build_tgsi_context * bld_base,
615 LLVMValueRef (*pos)[9], unsigned index)
616 {
617 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
618 struct si_pipe_shader *shader = si_shader_ctx->shader;
619 struct lp_build_context *base = &bld_base->base;
620 struct lp_build_context *uint = &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
621 unsigned reg_index;
622 unsigned chan;
623 unsigned const_chan;
624 LLVMValueRef out_elts[4];
625 LLVMValueRef base_elt;
626 LLVMValueRef ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
627 LLVMValueRef const_resource = build_indexed_load(si_shader_ctx, ptr, uint->one);
628
629 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
630 LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][chan];
631 out_elts[chan] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
632 }
633
634 for (reg_index = 0; reg_index < 2; reg_index ++) {
635 LLVMValueRef *args = pos[2 + reg_index];
636
637 if (!(shader->key.vs.ucps_enabled & (1 << reg_index)))
638 continue;
639
640 shader->shader.clip_dist_write |= 0xf << (4 * reg_index);
641
642 args[5] =
643 args[6] =
644 args[7] =
645 args[8] = lp_build_const_float(base->gallivm, 0.0f);
646
647 /* Compute dot products of position and user clip plane vectors */
648 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
649 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
650 args[0] = const_resource;
651 args[1] = lp_build_const_int32(base->gallivm,
652 ((reg_index * 4 + chan) * 4 +
653 const_chan) * 4);
654 base_elt = build_intrinsic(base->gallivm->builder,
655 "llvm.SI.load.const",
656 base->elem_type,
657 args, 2,
658 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
659 args[5 + chan] =
660 lp_build_add(base, args[5 + chan],
661 lp_build_mul(base, base_elt,
662 out_elts[const_chan]));
663 }
664 }
665
666 args[0] = lp_build_const_int32(base->gallivm, 0xf);
667 args[1] = uint->zero;
668 args[2] = uint->zero;
669 args[3] = lp_build_const_int32(base->gallivm,
670 V_008DFC_SQ_EXP_POS + 2 + reg_index);
671 args[4] = uint->zero;
672 }
673 }
674
675 static void si_dump_streamout(struct pipe_stream_output_info *so)
676 {
677 unsigned i;
678
679 if (so->num_outputs)
680 fprintf(stderr, "STREAMOUT\n");
681
682 for (i = 0; i < so->num_outputs; i++) {
683 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
684 so->output[i].start_component;
685 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
686 i, so->output[i].output_buffer,
687 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
688 so->output[i].register_index,
689 mask & 1 ? "x" : "",
690 mask & 2 ? "y" : "",
691 mask & 4 ? "z" : "",
692 mask & 8 ? "w" : "");
693 }
694 }
695
696 /* TBUFFER_STORE_FORMAT_{X,XY,XYZ,XYZW} <- the suffix is selected by num_channels=1..4.
697 * The type of vdata must be one of i32 (num_channels=1), v2i32 (num_channels=2),
698 * or v4i32 (num_channels=3,4). */
699 static void build_tbuffer_store(struct si_shader_context *shader,
700 LLVMValueRef rsrc,
701 LLVMValueRef vdata,
702 unsigned num_channels,
703 LLVMValueRef vaddr,
704 LLVMValueRef soffset,
705 unsigned inst_offset,
706 unsigned dfmt,
707 unsigned nfmt,
708 unsigned offen,
709 unsigned idxen,
710 unsigned glc,
711 unsigned slc,
712 unsigned tfe)
713 {
714 struct gallivm_state *gallivm = &shader->radeon_bld.gallivm;
715 LLVMTypeRef i32 = LLVMInt32TypeInContext(gallivm->context);
716 LLVMValueRef args[] = {
717 rsrc,
718 vdata,
719 LLVMConstInt(i32, num_channels, 0),
720 vaddr,
721 soffset,
722 LLVMConstInt(i32, inst_offset, 0),
723 LLVMConstInt(i32, dfmt, 0),
724 LLVMConstInt(i32, nfmt, 0),
725 LLVMConstInt(i32, offen, 0),
726 LLVMConstInt(i32, idxen, 0),
727 LLVMConstInt(i32, glc, 0),
728 LLVMConstInt(i32, slc, 0),
729 LLVMConstInt(i32, tfe, 0)
730 };
731
732 /* The intrinsic is overloaded, we need to add a type suffix for overloading to work. */
733 unsigned func = CLAMP(num_channels, 1, 3) - 1;
734 const char *types[] = {"i32", "v2i32", "v4i32"};
735 char name[256];
736 snprintf(name, sizeof(name), "llvm.SI.tbuffer.store.%s", types[func]);
737
738 lp_build_intrinsic(gallivm->builder, name,
739 LLVMVoidTypeInContext(gallivm->context),
740 args, Elements(args));
741 }
742
743 static void build_streamout_store(struct si_shader_context *shader,
744 LLVMValueRef rsrc,
745 LLVMValueRef vdata,
746 unsigned num_channels,
747 LLVMValueRef vaddr,
748 LLVMValueRef soffset,
749 unsigned inst_offset)
750 {
751 static unsigned dfmt[] = {
752 V_008F0C_BUF_DATA_FORMAT_32,
753 V_008F0C_BUF_DATA_FORMAT_32_32,
754 V_008F0C_BUF_DATA_FORMAT_32_32_32,
755 V_008F0C_BUF_DATA_FORMAT_32_32_32_32
756 };
757 assert(num_channels >= 1 && num_channels <= 4);
758
759 build_tbuffer_store(shader, rsrc, vdata, num_channels, vaddr, soffset,
760 inst_offset, dfmt[num_channels-1],
761 V_008F0C_BUF_NUM_FORMAT_UINT, 1, 0, 1, 1, 0);
762 }
763
764 /* On SI, the vertex shader is responsible for writing streamout data
765 * to buffers. */
766 static void si_llvm_emit_streamout(struct si_shader_context *shader)
767 {
768 struct pipe_stream_output_info *so = &shader->shader->selector->so;
769 struct gallivm_state *gallivm = &shader->radeon_bld.gallivm;
770 LLVMBuilderRef builder = gallivm->builder;
771 int i, j;
772 struct lp_build_if_state if_ctx;
773
774 LLVMTypeRef i32 = LLVMInt32TypeInContext(gallivm->context);
775
776 LLVMValueRef so_param =
777 LLVMGetParam(shader->radeon_bld.main_fn,
778 shader->param_streamout_config);
779
780 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
781 LLVMValueRef so_vtx_count =
782 LLVMBuildAnd(builder,
783 LLVMBuildLShr(builder, so_param,
784 LLVMConstInt(i32, 16, 0), ""),
785 LLVMConstInt(i32, 127, 0), "");
786
787 LLVMValueRef tid = build_intrinsic(builder, "llvm.SI.tid", i32,
788 NULL, 0, LLVMReadNoneAttribute);
789
790 /* can_emit = tid < so_vtx_count; */
791 LLVMValueRef can_emit =
792 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
793
794 /* Emit the streamout code conditionally. This actually avoids
795 * out-of-bounds buffer access. The hw tells us via the SGPR
796 * (so_vtx_count) which threads are allowed to emit streamout data. */
797 lp_build_if(&if_ctx, gallivm, can_emit);
798 {
799 /* The buffer offset is computed as follows:
800 * ByteOffset = streamout_offset[buffer_id]*4 +
801 * (streamout_write_index + thread_id)*stride[buffer_id] +
802 * attrib_offset
803 */
804
805 LLVMValueRef so_write_index =
806 LLVMGetParam(shader->radeon_bld.main_fn,
807 shader->param_streamout_write_index);
808
809 /* Compute (streamout_write_index + thread_id). */
810 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
811
812 /* Compute the write offset for each enabled buffer. */
813 LLVMValueRef so_write_offset[4] = {};
814 for (i = 0; i < 4; i++) {
815 if (!so->stride[i])
816 continue;
817
818 LLVMValueRef so_offset = LLVMGetParam(shader->radeon_bld.main_fn,
819 shader->param_streamout_offset[i]);
820 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(i32, 4, 0), "");
821
822 so_write_offset[i] = LLVMBuildMul(builder, so_write_index,
823 LLVMConstInt(i32, so->stride[i]*4, 0), "");
824 so_write_offset[i] = LLVMBuildAdd(builder, so_write_offset[i], so_offset, "");
825 }
826
827 LLVMValueRef (*outputs)[TGSI_NUM_CHANNELS] = shader->radeon_bld.soa.outputs;
828
829 /* Write streamout data. */
830 for (i = 0; i < so->num_outputs; i++) {
831 unsigned buf_idx = so->output[i].output_buffer;
832 unsigned reg = so->output[i].register_index;
833 unsigned start = so->output[i].start_component;
834 unsigned num_comps = so->output[i].num_components;
835 LLVMValueRef out[4];
836
837 assert(num_comps && num_comps <= 4);
838 if (!num_comps || num_comps > 4)
839 continue;
840
841 /* Load the output as int. */
842 for (j = 0; j < num_comps; j++) {
843 out[j] = LLVMBuildLoad(builder, outputs[reg][start+j], "");
844 out[j] = LLVMBuildBitCast(builder, out[j], i32, "");
845 }
846
847 /* Pack the output. */
848 LLVMValueRef vdata = NULL;
849
850 switch (num_comps) {
851 case 1: /* as i32 */
852 vdata = out[0];
853 break;
854 case 2: /* as v2i32 */
855 case 3: /* as v4i32 (aligned to 4) */
856 case 4: /* as v4i32 */
857 vdata = LLVMGetUndef(LLVMVectorType(i32, util_next_power_of_two(num_comps)));
858 for (j = 0; j < num_comps; j++) {
859 vdata = LLVMBuildInsertElement(builder, vdata, out[j],
860 LLVMConstInt(i32, j, 0), "");
861 }
862 break;
863 }
864
865 build_streamout_store(shader, shader->so_buffers[buf_idx],
866 vdata, num_comps,
867 so_write_offset[buf_idx],
868 LLVMConstInt(i32, 0, 0),
869 so->output[i].dst_offset*4);
870 }
871 }
872 lp_build_endif(&if_ctx);
873 }
874
875 /* XXX: This is partially implemented for VS only at this point. It is not complete */
876 static void si_llvm_emit_epilogue(struct lp_build_tgsi_context * bld_base)
877 {
878 struct si_shader_context * si_shader_ctx = si_shader_context(bld_base);
879 struct si_shader * shader = &si_shader_ctx->shader->shader;
880 struct lp_build_context * base = &bld_base->base;
881 struct lp_build_context * uint =
882 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
883 struct tgsi_parse_context *parse = &si_shader_ctx->parse;
884 LLVMValueRef args[9];
885 LLVMValueRef last_args[9] = { 0 };
886 LLVMValueRef pos_args[4][9] = { { 0 } };
887 unsigned semantic_name;
888 unsigned param_count = 0;
889 int depth_index = -1, stencil_index = -1;
890 int i;
891
892 if (si_shader_ctx->shader->selector->so.num_outputs) {
893 si_llvm_emit_streamout(si_shader_ctx);
894 }
895
896 while (!tgsi_parse_end_of_tokens(parse)) {
897 struct tgsi_full_declaration *d =
898 &parse->FullToken.FullDeclaration;
899 unsigned target;
900 unsigned index;
901
902 tgsi_parse_token(parse);
903
904 if (parse->FullToken.Token.Type == TGSI_TOKEN_TYPE_PROPERTY &&
905 parse->FullToken.FullProperty.Property.PropertyName ==
906 TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS)
907 shader->fs_write_all = TRUE;
908
909 if (parse->FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
910 continue;
911
912 switch (d->Declaration.File) {
913 case TGSI_FILE_INPUT:
914 i = shader->ninput++;
915 assert(i < Elements(shader->input));
916 shader->input[i].name = d->Semantic.Name;
917 shader->input[i].sid = d->Semantic.Index;
918 shader->input[i].interpolate = d->Interp.Interpolate;
919 shader->input[i].centroid = d->Interp.Centroid;
920 continue;
921
922 case TGSI_FILE_OUTPUT:
923 i = shader->noutput++;
924 assert(i < Elements(shader->output));
925 shader->output[i].name = d->Semantic.Name;
926 shader->output[i].sid = d->Semantic.Index;
927 shader->output[i].interpolate = d->Interp.Interpolate;
928 break;
929
930 default:
931 continue;
932 }
933
934 semantic_name = d->Semantic.Name;
935 handle_semantic:
936 for (index = d->Range.First; index <= d->Range.Last; index++) {
937 /* Select the correct target */
938 switch(semantic_name) {
939 case TGSI_SEMANTIC_PSIZE:
940 shader->vs_out_misc_write = 1;
941 shader->vs_out_point_size = 1;
942 target = V_008DFC_SQ_EXP_POS + 1;
943 break;
944 case TGSI_SEMANTIC_POSITION:
945 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
946 target = V_008DFC_SQ_EXP_POS;
947 break;
948 } else {
949 depth_index = index;
950 continue;
951 }
952 case TGSI_SEMANTIC_STENCIL:
953 stencil_index = index;
954 continue;
955 case TGSI_SEMANTIC_COLOR:
956 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
957 case TGSI_SEMANTIC_BCOLOR:
958 target = V_008DFC_SQ_EXP_PARAM + param_count;
959 shader->output[i].param_offset = param_count;
960 param_count++;
961 } else {
962 target = V_008DFC_SQ_EXP_MRT + shader->output[i].sid;
963 if (si_shader_ctx->shader->key.ps.alpha_to_one) {
964 si_alpha_to_one(bld_base, index);
965 }
966 if (shader->output[i].sid == 0 &&
967 si_shader_ctx->shader->key.ps.alpha_func != PIPE_FUNC_ALWAYS)
968 si_alpha_test(bld_base, index);
969 }
970 break;
971 case TGSI_SEMANTIC_CLIPDIST:
972 if (!(si_shader_ctx->shader->key.vs.ucps_enabled &
973 (1 << d->Semantic.Index)))
974 continue;
975 shader->clip_dist_write |=
976 d->Declaration.UsageMask << (d->Semantic.Index << 2);
977 target = V_008DFC_SQ_EXP_POS + 2 + d->Semantic.Index;
978 break;
979 case TGSI_SEMANTIC_CLIPVERTEX:
980 si_llvm_emit_clipvertex(bld_base, pos_args, index);
981 continue;
982 case TGSI_SEMANTIC_FOG:
983 case TGSI_SEMANTIC_GENERIC:
984 target = V_008DFC_SQ_EXP_PARAM + param_count;
985 shader->output[i].param_offset = param_count;
986 param_count++;
987 break;
988 default:
989 target = 0;
990 fprintf(stderr,
991 "Warning: SI unhandled output type:%d\n",
992 semantic_name);
993 }
994
995 si_llvm_init_export_args(bld_base, d, index, target, args);
996
997 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX &&
998 target >= V_008DFC_SQ_EXP_POS &&
999 target <= (V_008DFC_SQ_EXP_POS + 3)) {
1000 memcpy(pos_args[target - V_008DFC_SQ_EXP_POS],
1001 args, sizeof(args));
1002 } else if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT &&
1003 semantic_name == TGSI_SEMANTIC_COLOR) {
1004 if (last_args[0]) {
1005 lp_build_intrinsic(base->gallivm->builder,
1006 "llvm.SI.export",
1007 LLVMVoidTypeInContext(base->gallivm->context),
1008 last_args, 9);
1009 }
1010
1011 memcpy(last_args, args, sizeof(args));
1012 } else {
1013 lp_build_intrinsic(base->gallivm->builder,
1014 "llvm.SI.export",
1015 LLVMVoidTypeInContext(base->gallivm->context),
1016 args, 9);
1017 }
1018
1019 }
1020
1021 if (semantic_name == TGSI_SEMANTIC_CLIPDIST) {
1022 semantic_name = TGSI_SEMANTIC_GENERIC;
1023 goto handle_semantic;
1024 }
1025 }
1026
1027 if (depth_index >= 0 || stencil_index >= 0) {
1028 LLVMValueRef out_ptr;
1029 unsigned mask = 0;
1030
1031 /* Specify the target we are exporting */
1032 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRTZ);
1033
1034 if (depth_index >= 0) {
1035 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[depth_index][2];
1036 args[5] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
1037 mask |= 0x1;
1038
1039 if (stencil_index < 0) {
1040 args[6] =
1041 args[7] =
1042 args[8] = args[5];
1043 }
1044 }
1045
1046 if (stencil_index >= 0) {
1047 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[stencil_index][1];
1048 args[7] =
1049 args[8] =
1050 args[6] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
1051 /* Only setting the stencil component bit (0x2) here
1052 * breaks some stencil piglit tests
1053 */
1054 mask |= 0x3;
1055
1056 if (depth_index < 0)
1057 args[5] = args[6];
1058 }
1059
1060 /* Specify which components to enable */
1061 args[0] = lp_build_const_int32(base->gallivm, mask);
1062
1063 args[1] =
1064 args[2] =
1065 args[4] = uint->zero;
1066
1067 if (last_args[0])
1068 lp_build_intrinsic(base->gallivm->builder,
1069 "llvm.SI.export",
1070 LLVMVoidTypeInContext(base->gallivm->context),
1071 args, 9);
1072 else
1073 memcpy(last_args, args, sizeof(args));
1074 }
1075
1076 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
1077 unsigned pos_idx = 0;
1078
1079 /* We need to add the position output manually if it's missing. */
1080 if (!pos_args[0][0]) {
1081 pos_args[0][0] = lp_build_const_int32(base->gallivm, 0xf); /* writemask */
1082 pos_args[0][1] = uint->zero; /* EXEC mask */
1083 pos_args[0][2] = uint->zero; /* last export? */
1084 pos_args[0][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS);
1085 pos_args[0][4] = uint->zero; /* COMPR flag */
1086 pos_args[0][5] = base->zero; /* X */
1087 pos_args[0][6] = base->zero; /* Y */
1088 pos_args[0][7] = base->zero; /* Z */
1089 pos_args[0][8] = base->one; /* W */
1090 }
1091
1092 for (i = 0; i < 4; i++)
1093 if (pos_args[i][0])
1094 shader->nr_pos_exports++;
1095
1096 for (i = 0; i < 4; i++) {
1097 if (!pos_args[i][0])
1098 continue;
1099
1100 /* Specify the target we are exporting */
1101 pos_args[i][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS + pos_idx++);
1102
1103 if (pos_idx == shader->nr_pos_exports)
1104 /* Specify that this is the last export */
1105 pos_args[i][2] = uint->one;
1106
1107 lp_build_intrinsic(base->gallivm->builder,
1108 "llvm.SI.export",
1109 LLVMVoidTypeInContext(base->gallivm->context),
1110 pos_args[i], 9);
1111 }
1112 } else {
1113 if (!last_args[0]) {
1114 /* Specify which components to enable */
1115 last_args[0] = lp_build_const_int32(base->gallivm, 0x0);
1116
1117 /* Specify the target we are exporting */
1118 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
1119
1120 /* Set COMPR flag to zero to export data as 32-bit */
1121 last_args[4] = uint->zero;
1122
1123 /* dummy bits */
1124 last_args[5]= uint->zero;
1125 last_args[6]= uint->zero;
1126 last_args[7]= uint->zero;
1127 last_args[8]= uint->zero;
1128
1129 si_shader_ctx->shader->spi_shader_col_format |=
1130 V_028714_SPI_SHADER_32_ABGR;
1131 si_shader_ctx->shader->cb_shader_mask |= S_02823C_OUTPUT0_ENABLE(0xf);
1132 }
1133
1134 /* Specify whether the EXEC mask represents the valid mask */
1135 last_args[1] = uint->one;
1136
1137 if (shader->fs_write_all && shader->nr_cbufs > 1) {
1138 int i;
1139
1140 /* Specify that this is not yet the last export */
1141 last_args[2] = lp_build_const_int32(base->gallivm, 0);
1142
1143 for (i = 1; i < shader->nr_cbufs; i++) {
1144 /* Specify the target we are exporting */
1145 last_args[3] = lp_build_const_int32(base->gallivm,
1146 V_008DFC_SQ_EXP_MRT + i);
1147
1148 lp_build_intrinsic(base->gallivm->builder,
1149 "llvm.SI.export",
1150 LLVMVoidTypeInContext(base->gallivm->context),
1151 last_args, 9);
1152
1153 si_shader_ctx->shader->spi_shader_col_format |=
1154 si_shader_ctx->shader->spi_shader_col_format << 4;
1155 si_shader_ctx->shader->cb_shader_mask |=
1156 si_shader_ctx->shader->cb_shader_mask << 4;
1157 }
1158
1159 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
1160 }
1161
1162 /* Specify that this is the last export */
1163 last_args[2] = lp_build_const_int32(base->gallivm, 1);
1164
1165 lp_build_intrinsic(base->gallivm->builder,
1166 "llvm.SI.export",
1167 LLVMVoidTypeInContext(base->gallivm->context),
1168 last_args, 9);
1169 }
1170 /* XXX: Look up what this function does */
1171 /* ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);*/
1172 }
1173
1174 static const struct lp_build_tgsi_action txf_action;
1175
1176 static void build_tex_intrinsic(const struct lp_build_tgsi_action * action,
1177 struct lp_build_tgsi_context * bld_base,
1178 struct lp_build_emit_data * emit_data);
1179
1180 static void tex_fetch_args(
1181 struct lp_build_tgsi_context * bld_base,
1182 struct lp_build_emit_data * emit_data)
1183 {
1184 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
1185 struct gallivm_state *gallivm = bld_base->base.gallivm;
1186 const struct tgsi_full_instruction * inst = emit_data->inst;
1187 unsigned opcode = inst->Instruction.Opcode;
1188 unsigned target = inst->Texture.Texture;
1189 unsigned sampler_src, sampler_index;
1190 LLVMValueRef coords[4];
1191 LLVMValueRef address[16];
1192 int ref_pos;
1193 unsigned num_coords = tgsi_util_get_texture_coord_dim(target, &ref_pos);
1194 unsigned count = 0;
1195 unsigned chan;
1196
1197 /* Fetch and project texture coordinates */
1198 coords[3] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
1199 for (chan = 0; chan < 3; chan++ ) {
1200 coords[chan] = lp_build_emit_fetch(bld_base,
1201 emit_data->inst, 0,
1202 chan);
1203 if (opcode == TGSI_OPCODE_TXP)
1204 coords[chan] = lp_build_emit_llvm_binary(bld_base,
1205 TGSI_OPCODE_DIV,
1206 coords[chan],
1207 coords[3]);
1208 }
1209
1210 if (opcode == TGSI_OPCODE_TXP)
1211 coords[3] = bld_base->base.one;
1212
1213 /* Pack LOD bias value */
1214 if (opcode == TGSI_OPCODE_TXB)
1215 address[count++] = coords[3];
1216
1217 if (target == TGSI_TEXTURE_CUBE || target == TGSI_TEXTURE_SHADOWCUBE)
1218 radeon_llvm_emit_prepare_cube_coords(bld_base, emit_data, coords);
1219
1220 /* Pack depth comparison value */
1221 switch (target) {
1222 case TGSI_TEXTURE_SHADOW1D:
1223 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1224 case TGSI_TEXTURE_SHADOW2D:
1225 case TGSI_TEXTURE_SHADOWRECT:
1226 case TGSI_TEXTURE_SHADOWCUBE:
1227 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1228 assert(ref_pos >= 0);
1229 address[count++] = coords[ref_pos];
1230 break;
1231 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
1232 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, 0);
1233 }
1234
1235 /* Pack user derivatives */
1236 if (opcode == TGSI_OPCODE_TXD) {
1237 for (chan = 0; chan < 2; chan++) {
1238 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, chan);
1239 if (num_coords > 1)
1240 address[count++] = lp_build_emit_fetch(bld_base, inst, 2, chan);
1241 }
1242 }
1243
1244 /* Pack texture coordinates */
1245 address[count++] = coords[0];
1246 if (num_coords > 1)
1247 address[count++] = coords[1];
1248 if (num_coords > 2)
1249 address[count++] = coords[2];
1250
1251 /* Pack LOD or sample index */
1252 if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXF)
1253 address[count++] = coords[3];
1254
1255 if (count > 16) {
1256 assert(!"Cannot handle more than 16 texture address parameters");
1257 count = 16;
1258 }
1259
1260 for (chan = 0; chan < count; chan++ ) {
1261 address[chan] = LLVMBuildBitCast(gallivm->builder,
1262 address[chan],
1263 LLVMInt32TypeInContext(gallivm->context),
1264 "");
1265 }
1266
1267 sampler_src = emit_data->inst->Instruction.NumSrcRegs - 1;
1268 sampler_index = emit_data->inst->Src[sampler_src].Register.Index;
1269
1270 /* Adjust the sample index according to FMASK.
1271 *
1272 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
1273 * which is the identity mapping. Each nibble says which physical sample
1274 * should be fetched to get that sample.
1275 *
1276 * For example, 0x11111100 means there are only 2 samples stored and
1277 * the second sample covers 3/4 of the pixel. When reading samples 0
1278 * and 1, return physical sample 0 (determined by the first two 0s
1279 * in FMASK), otherwise return physical sample 1.
1280 *
1281 * The sample index should be adjusted as follows:
1282 * sample_index = (fmask >> (sample_index * 4)) & 0xF;
1283 */
1284 if (target == TGSI_TEXTURE_2D_MSAA ||
1285 target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
1286 struct lp_build_context *uint_bld = &bld_base->uint_bld;
1287 struct lp_build_emit_data txf_emit_data = *emit_data;
1288 LLVMValueRef txf_address[4];
1289 unsigned txf_count = count;
1290
1291 memcpy(txf_address, address, sizeof(txf_address));
1292
1293 if (target == TGSI_TEXTURE_2D_MSAA) {
1294 txf_address[2] = bld_base->uint_bld.zero;
1295 }
1296 txf_address[3] = bld_base->uint_bld.zero;
1297
1298 /* Pad to a power-of-two size. */
1299 while (txf_count < util_next_power_of_two(txf_count))
1300 txf_address[txf_count++] = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
1301
1302 /* Read FMASK using TXF. */
1303 txf_emit_data.chan = 0;
1304 txf_emit_data.dst_type = LLVMVectorType(
1305 LLVMInt32TypeInContext(bld_base->base.gallivm->context), 4);
1306 txf_emit_data.args[0] = lp_build_gather_values(gallivm, txf_address, txf_count);
1307 txf_emit_data.args[1] = si_shader_ctx->resources[FMASK_TEX_OFFSET + sampler_index];
1308 txf_emit_data.args[2] = lp_build_const_int32(bld_base->base.gallivm,
1309 target == TGSI_TEXTURE_2D_MSAA ? TGSI_TEXTURE_2D : TGSI_TEXTURE_2D_ARRAY);
1310 txf_emit_data.arg_count = 3;
1311
1312 build_tex_intrinsic(&txf_action, bld_base, &txf_emit_data);
1313
1314 /* Initialize some constants. */
1315 LLVMValueRef four = LLVMConstInt(uint_bld->elem_type, 4, 0);
1316 LLVMValueRef F = LLVMConstInt(uint_bld->elem_type, 0xF, 0);
1317
1318 /* Apply the formula. */
1319 LLVMValueRef fmask =
1320 LLVMBuildExtractElement(gallivm->builder,
1321 txf_emit_data.output[0],
1322 uint_bld->zero, "");
1323
1324 unsigned sample_chan = target == TGSI_TEXTURE_2D_MSAA ? 2 : 3;
1325
1326 LLVMValueRef sample_index4 =
1327 LLVMBuildMul(gallivm->builder, address[sample_chan], four, "");
1328
1329 LLVMValueRef shifted_fmask =
1330 LLVMBuildLShr(gallivm->builder, fmask, sample_index4, "");
1331
1332 LLVMValueRef final_sample =
1333 LLVMBuildAnd(gallivm->builder, shifted_fmask, F, "");
1334
1335 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
1336 * resource descriptor is 0 (invalid),
1337 */
1338 LLVMValueRef fmask_desc =
1339 LLVMBuildBitCast(gallivm->builder,
1340 si_shader_ctx->resources[FMASK_TEX_OFFSET + sampler_index],
1341 LLVMVectorType(uint_bld->elem_type, 8), "");
1342
1343 LLVMValueRef fmask_word1 =
1344 LLVMBuildExtractElement(gallivm->builder, fmask_desc,
1345 uint_bld->one, "");
1346
1347 LLVMValueRef word1_is_nonzero =
1348 LLVMBuildICmp(gallivm->builder, LLVMIntNE,
1349 fmask_word1, uint_bld->zero, "");
1350
1351 /* Replace the MSAA sample index. */
1352 address[sample_chan] =
1353 LLVMBuildSelect(gallivm->builder, word1_is_nonzero,
1354 final_sample, address[sample_chan], "");
1355 }
1356
1357 /* Resource */
1358 emit_data->args[1] = si_shader_ctx->resources[sampler_index];
1359
1360 if (opcode == TGSI_OPCODE_TXF) {
1361 /* add tex offsets */
1362 if (inst->Texture.NumOffsets) {
1363 struct lp_build_context *uint_bld = &bld_base->uint_bld;
1364 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
1365 const struct tgsi_texture_offset * off = inst->TexOffsets;
1366
1367 assert(inst->Texture.NumOffsets == 1);
1368
1369 switch (target) {
1370 case TGSI_TEXTURE_3D:
1371 address[2] = lp_build_add(uint_bld, address[2],
1372 bld->immediates[off->Index][off->SwizzleZ]);
1373 /* fall through */
1374 case TGSI_TEXTURE_2D:
1375 case TGSI_TEXTURE_SHADOW2D:
1376 case TGSI_TEXTURE_RECT:
1377 case TGSI_TEXTURE_SHADOWRECT:
1378 case TGSI_TEXTURE_2D_ARRAY:
1379 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1380 address[1] =
1381 lp_build_add(uint_bld, address[1],
1382 bld->immediates[off->Index][off->SwizzleY]);
1383 /* fall through */
1384 case TGSI_TEXTURE_1D:
1385 case TGSI_TEXTURE_SHADOW1D:
1386 case TGSI_TEXTURE_1D_ARRAY:
1387 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1388 address[0] =
1389 lp_build_add(uint_bld, address[0],
1390 bld->immediates[off->Index][off->SwizzleX]);
1391 break;
1392 /* texture offsets do not apply to other texture targets */
1393 }
1394 }
1395
1396 emit_data->dst_type = LLVMVectorType(
1397 LLVMInt32TypeInContext(bld_base->base.gallivm->context),
1398 4);
1399
1400 emit_data->arg_count = 3;
1401 } else {
1402 /* Sampler */
1403 emit_data->args[2] = si_shader_ctx->samplers[sampler_index];
1404
1405 emit_data->dst_type = LLVMVectorType(
1406 LLVMFloatTypeInContext(bld_base->base.gallivm->context),
1407 4);
1408
1409 emit_data->arg_count = 4;
1410 }
1411
1412 /* Dimensions */
1413 emit_data->args[emit_data->arg_count - 1] =
1414 lp_build_const_int32(bld_base->base.gallivm, target);
1415
1416 /* Pad to power of two vector */
1417 while (count < util_next_power_of_two(count))
1418 address[count++] = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
1419
1420 emit_data->args[0] = lp_build_gather_values(gallivm, address, count);
1421 }
1422
1423 static void build_tex_intrinsic(const struct lp_build_tgsi_action * action,
1424 struct lp_build_tgsi_context * bld_base,
1425 struct lp_build_emit_data * emit_data)
1426 {
1427 struct lp_build_context * base = &bld_base->base;
1428 char intr_name[23];
1429
1430 sprintf(intr_name, "%sv%ui32", action->intr_name,
1431 LLVMGetVectorSize(LLVMTypeOf(emit_data->args[0])));
1432
1433 emit_data->output[emit_data->chan] = build_intrinsic(
1434 base->gallivm->builder, intr_name, emit_data->dst_type,
1435 emit_data->args, emit_data->arg_count,
1436 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1437 }
1438
1439 static void txq_fetch_args(
1440 struct lp_build_tgsi_context * bld_base,
1441 struct lp_build_emit_data * emit_data)
1442 {
1443 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
1444 const struct tgsi_full_instruction *inst = emit_data->inst;
1445
1446 /* Mip level */
1447 emit_data->args[0] = lp_build_emit_fetch(bld_base, inst, 0, TGSI_CHAN_X);
1448
1449 /* Resource */
1450 emit_data->args[1] = si_shader_ctx->resources[inst->Src[1].Register.Index];
1451
1452 /* Dimensions */
1453 emit_data->args[2] = lp_build_const_int32(bld_base->base.gallivm,
1454 inst->Texture.Texture);
1455
1456 emit_data->arg_count = 3;
1457
1458 emit_data->dst_type = LLVMVectorType(
1459 LLVMInt32TypeInContext(bld_base->base.gallivm->context),
1460 4);
1461 }
1462
1463 #if HAVE_LLVM >= 0x0304
1464
1465 static void si_llvm_emit_ddxy(
1466 const struct lp_build_tgsi_action * action,
1467 struct lp_build_tgsi_context * bld_base,
1468 struct lp_build_emit_data * emit_data)
1469 {
1470 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
1471 struct gallivm_state *gallivm = bld_base->base.gallivm;
1472 struct lp_build_context * base = &bld_base->base;
1473 const struct tgsi_full_instruction *inst = emit_data->inst;
1474 unsigned opcode = inst->Instruction.Opcode;
1475 LLVMValueRef indices[2];
1476 LLVMValueRef store_ptr, load_ptr0, load_ptr1;
1477 LLVMValueRef tl, trbl, result[4];
1478 LLVMTypeRef i32;
1479 unsigned swizzle[4];
1480 unsigned c;
1481
1482 i32 = LLVMInt32TypeInContext(gallivm->context);
1483
1484 indices[0] = bld_base->uint_bld.zero;
1485 indices[1] = build_intrinsic(gallivm->builder, "llvm.SI.tid", i32,
1486 NULL, 0, LLVMReadNoneAttribute);
1487 store_ptr = LLVMBuildGEP(gallivm->builder, si_shader_ctx->ddxy_lds,
1488 indices, 2, "");
1489
1490 indices[1] = LLVMBuildAnd(gallivm->builder, indices[1],
1491 lp_build_const_int32(gallivm, 0xfffffffc), "");
1492 load_ptr0 = LLVMBuildGEP(gallivm->builder, si_shader_ctx->ddxy_lds,
1493 indices, 2, "");
1494
1495 indices[1] = LLVMBuildAdd(gallivm->builder, indices[1],
1496 lp_build_const_int32(gallivm,
1497 opcode == TGSI_OPCODE_DDX ? 1 : 2),
1498 "");
1499 load_ptr1 = LLVMBuildGEP(gallivm->builder, si_shader_ctx->ddxy_lds,
1500 indices, 2, "");
1501
1502 for (c = 0; c < 4; ++c) {
1503 unsigned i;
1504
1505 swizzle[c] = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], c);
1506 for (i = 0; i < c; ++i) {
1507 if (swizzle[i] == swizzle[c]) {
1508 result[c] = result[i];
1509 break;
1510 }
1511 }
1512 if (i != c)
1513 continue;
1514
1515 LLVMBuildStore(gallivm->builder,
1516 LLVMBuildBitCast(gallivm->builder,
1517 lp_build_emit_fetch(bld_base, inst, 0, c),
1518 i32, ""),
1519 store_ptr);
1520
1521 tl = LLVMBuildLoad(gallivm->builder, load_ptr0, "");
1522 tl = LLVMBuildBitCast(gallivm->builder, tl, base->elem_type, "");
1523
1524 trbl = LLVMBuildLoad(gallivm->builder, load_ptr1, "");
1525 trbl = LLVMBuildBitCast(gallivm->builder, trbl, base->elem_type, "");
1526
1527 result[c] = LLVMBuildFSub(gallivm->builder, trbl, tl, "");
1528 }
1529
1530 emit_data->output[0] = lp_build_gather_values(gallivm, result, 4);
1531 }
1532
1533 #endif /* HAVE_LLVM >= 0x0304 */
1534
1535 static const struct lp_build_tgsi_action tex_action = {
1536 .fetch_args = tex_fetch_args,
1537 .emit = build_tex_intrinsic,
1538 .intr_name = "llvm.SI.sample."
1539 };
1540
1541 static const struct lp_build_tgsi_action txb_action = {
1542 .fetch_args = tex_fetch_args,
1543 .emit = build_tex_intrinsic,
1544 .intr_name = "llvm.SI.sampleb."
1545 };
1546
1547 #if HAVE_LLVM >= 0x0304
1548 static const struct lp_build_tgsi_action txd_action = {
1549 .fetch_args = tex_fetch_args,
1550 .emit = build_tex_intrinsic,
1551 .intr_name = "llvm.SI.sampled."
1552 };
1553 #endif
1554
1555 static const struct lp_build_tgsi_action txf_action = {
1556 .fetch_args = tex_fetch_args,
1557 .emit = build_tex_intrinsic,
1558 .intr_name = "llvm.SI.imageload."
1559 };
1560
1561 static const struct lp_build_tgsi_action txl_action = {
1562 .fetch_args = tex_fetch_args,
1563 .emit = build_tex_intrinsic,
1564 .intr_name = "llvm.SI.samplel."
1565 };
1566
1567 static const struct lp_build_tgsi_action txq_action = {
1568 .fetch_args = txq_fetch_args,
1569 .emit = build_tgsi_intrinsic_nomem,
1570 .intr_name = "llvm.SI.resinfo"
1571 };
1572
1573 static void create_meta_data(struct si_shader_context *si_shader_ctx)
1574 {
1575 struct gallivm_state *gallivm = si_shader_ctx->radeon_bld.soa.bld_base.base.gallivm;
1576 LLVMValueRef args[3];
1577
1578 args[0] = LLVMMDStringInContext(gallivm->context, "const", 5);
1579 args[1] = 0;
1580 args[2] = lp_build_const_int32(gallivm, 1);
1581
1582 si_shader_ctx->const_md = LLVMMDNodeInContext(gallivm->context, args, 3);
1583 }
1584
1585 static void create_function(struct si_shader_context *si_shader_ctx)
1586 {
1587 struct lp_build_tgsi_context *bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
1588 struct gallivm_state *gallivm = bld_base->base.gallivm;
1589 LLVMTypeRef params[21], f32, i8, i32, v2i32, v3i32;
1590 unsigned i, last_sgpr, num_params;
1591
1592 i8 = LLVMInt8TypeInContext(gallivm->context);
1593 i32 = LLVMInt32TypeInContext(gallivm->context);
1594 f32 = LLVMFloatTypeInContext(gallivm->context);
1595 v2i32 = LLVMVectorType(i32, 2);
1596 v3i32 = LLVMVectorType(i32, 3);
1597
1598 params[SI_PARAM_CONST] = LLVMPointerType(
1599 LLVMArrayType(LLVMVectorType(i8, 16), NUM_CONST_BUFFERS), CONST_ADDR_SPACE);
1600 /* We assume at most 16 textures per program at the moment.
1601 * This need probably need to be changed to support bindless textures */
1602 params[SI_PARAM_SAMPLER] = LLVMPointerType(
1603 LLVMArrayType(LLVMVectorType(i8, 16), NUM_SAMPLER_VIEWS), CONST_ADDR_SPACE);
1604 params[SI_PARAM_RESOURCE] = LLVMPointerType(
1605 LLVMArrayType(LLVMVectorType(i8, 32), NUM_SAMPLER_STATES), CONST_ADDR_SPACE);
1606
1607 switch (si_shader_ctx->type) {
1608 case TGSI_PROCESSOR_VERTEX:
1609 params[SI_PARAM_VERTEX_BUFFER] = params[SI_PARAM_CONST];
1610 params[SI_PARAM_SO_BUFFER] = params[SI_PARAM_CONST];
1611 params[SI_PARAM_START_INSTANCE] = i32;
1612 num_params = SI_PARAM_START_INSTANCE+1;
1613
1614 /* The locations of the other parameters are assigned dynamically. */
1615
1616 /* Streamout SGPRs. */
1617 if (si_shader_ctx->shader->selector->so.num_outputs) {
1618 params[si_shader_ctx->param_streamout_config = num_params++] = i32;
1619 params[si_shader_ctx->param_streamout_write_index = num_params++] = i32;
1620 }
1621 /* A streamout buffer offset is loaded if the stride is non-zero. */
1622 for (i = 0; i < 4; i++) {
1623 if (!si_shader_ctx->shader->selector->so.stride[i])
1624 continue;
1625
1626 params[si_shader_ctx->param_streamout_offset[i] = num_params++] = i32;
1627 }
1628
1629 last_sgpr = num_params-1;
1630
1631 /* VGPRs */
1632 params[si_shader_ctx->param_vertex_id = num_params++] = i32;
1633 params[num_params++] = i32; /* unused*/
1634 params[num_params++] = i32; /* unused */
1635 params[si_shader_ctx->param_instance_id = num_params++] = i32;
1636 break;
1637
1638 case TGSI_PROCESSOR_FRAGMENT:
1639 params[SI_PARAM_ALPHA_REF] = f32;
1640 params[SI_PARAM_PRIM_MASK] = i32;
1641 last_sgpr = SI_PARAM_PRIM_MASK;
1642 params[SI_PARAM_PERSP_SAMPLE] = v2i32;
1643 params[SI_PARAM_PERSP_CENTER] = v2i32;
1644 params[SI_PARAM_PERSP_CENTROID] = v2i32;
1645 params[SI_PARAM_PERSP_PULL_MODEL] = v3i32;
1646 params[SI_PARAM_LINEAR_SAMPLE] = v2i32;
1647 params[SI_PARAM_LINEAR_CENTER] = v2i32;
1648 params[SI_PARAM_LINEAR_CENTROID] = v2i32;
1649 params[SI_PARAM_LINE_STIPPLE_TEX] = f32;
1650 params[SI_PARAM_POS_X_FLOAT] = f32;
1651 params[SI_PARAM_POS_Y_FLOAT] = f32;
1652 params[SI_PARAM_POS_Z_FLOAT] = f32;
1653 params[SI_PARAM_POS_W_FLOAT] = f32;
1654 params[SI_PARAM_FRONT_FACE] = f32;
1655 params[SI_PARAM_ANCILLARY] = f32;
1656 params[SI_PARAM_SAMPLE_COVERAGE] = f32;
1657 params[SI_PARAM_POS_FIXED_PT] = f32;
1658 num_params = SI_PARAM_POS_FIXED_PT+1;
1659 break;
1660
1661 default:
1662 assert(0 && "unimplemented shader");
1663 return;
1664 }
1665
1666 assert(num_params <= Elements(params));
1667 radeon_llvm_create_func(&si_shader_ctx->radeon_bld, params, num_params);
1668 radeon_llvm_shader_type(si_shader_ctx->radeon_bld.main_fn, si_shader_ctx->type);
1669
1670 for (i = 0; i <= last_sgpr; ++i) {
1671 LLVMValueRef P = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, i);
1672 switch (i) {
1673 default:
1674 LLVMAddAttribute(P, LLVMInRegAttribute);
1675 break;
1676 #if HAVE_LLVM >= 0x0304
1677 /* We tell llvm that array inputs are passed by value to allow Sinking pass
1678 * to move load. Inputs are constant so this is fine. */
1679 case SI_PARAM_CONST:
1680 case SI_PARAM_SAMPLER:
1681 case SI_PARAM_RESOURCE:
1682 LLVMAddAttribute(P, LLVMByValAttribute);
1683 break;
1684 #endif
1685 }
1686 }
1687
1688 #if HAVE_LLVM >= 0x0304
1689 if (bld_base->info->opcode_count[TGSI_OPCODE_DDX] > 0 ||
1690 bld_base->info->opcode_count[TGSI_OPCODE_DDY] > 0)
1691 si_shader_ctx->ddxy_lds =
1692 LLVMAddGlobalInAddressSpace(gallivm->module,
1693 LLVMArrayType(i32, 64),
1694 "ddxy_lds",
1695 LOCAL_ADDR_SPACE);
1696 #endif
1697 }
1698
1699 static void preload_constants(struct si_shader_context *si_shader_ctx)
1700 {
1701 struct lp_build_tgsi_context * bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
1702 struct gallivm_state * gallivm = bld_base->base.gallivm;
1703 const struct tgsi_shader_info * info = bld_base->info;
1704
1705 unsigned i, num_const = info->file_max[TGSI_FILE_CONSTANT] + 1;
1706
1707 LLVMValueRef ptr;
1708
1709 if (num_const == 0)
1710 return;
1711
1712 /* Allocate space for the constant values */
1713 si_shader_ctx->constants = CALLOC(num_const * 4, sizeof(LLVMValueRef));
1714
1715 /* Load the resource descriptor */
1716 ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
1717 si_shader_ctx->const_resource = build_indexed_load(si_shader_ctx, ptr, bld_base->uint_bld.zero);
1718
1719 /* Load the constants, we rely on the code sinking to do the rest */
1720 for (i = 0; i < num_const * 4; ++i) {
1721 LLVMValueRef args[2] = {
1722 si_shader_ctx->const_resource,
1723 lp_build_const_int32(gallivm, i * 4)
1724 };
1725 si_shader_ctx->constants[i] = build_intrinsic(gallivm->builder, "llvm.SI.load.const",
1726 bld_base->base.elem_type, args, 2, LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1727 }
1728 }
1729
1730 static void preload_samplers(struct si_shader_context *si_shader_ctx)
1731 {
1732 struct lp_build_tgsi_context * bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
1733 struct gallivm_state * gallivm = bld_base->base.gallivm;
1734 const struct tgsi_shader_info * info = bld_base->info;
1735
1736 unsigned i, num_samplers = info->file_max[TGSI_FILE_SAMPLER] + 1;
1737
1738 LLVMValueRef res_ptr, samp_ptr;
1739 LLVMValueRef offset;
1740
1741 if (num_samplers == 0)
1742 return;
1743
1744 /* Allocate space for the values */
1745 si_shader_ctx->resources = CALLOC(NUM_SAMPLER_VIEWS, sizeof(LLVMValueRef));
1746 si_shader_ctx->samplers = CALLOC(num_samplers, sizeof(LLVMValueRef));
1747
1748 res_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_RESOURCE);
1749 samp_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_SAMPLER);
1750
1751 /* Load the resources and samplers, we rely on the code sinking to do the rest */
1752 for (i = 0; i < num_samplers; ++i) {
1753 /* Resource */
1754 offset = lp_build_const_int32(gallivm, i);
1755 si_shader_ctx->resources[i] = build_indexed_load(si_shader_ctx, res_ptr, offset);
1756
1757 /* Sampler */
1758 offset = lp_build_const_int32(gallivm, i);
1759 si_shader_ctx->samplers[i] = build_indexed_load(si_shader_ctx, samp_ptr, offset);
1760
1761 /* FMASK resource */
1762 if (info->is_msaa_sampler[i]) {
1763 offset = lp_build_const_int32(gallivm, FMASK_TEX_OFFSET + i);
1764 si_shader_ctx->resources[FMASK_TEX_OFFSET + i] =
1765 build_indexed_load(si_shader_ctx, res_ptr, offset);
1766 }
1767 }
1768 }
1769
1770 static void preload_streamout_buffers(struct si_shader_context *si_shader_ctx)
1771 {
1772 struct lp_build_tgsi_context * bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
1773 struct gallivm_state * gallivm = bld_base->base.gallivm;
1774 unsigned i;
1775
1776 if (!si_shader_ctx->shader->selector->so.num_outputs)
1777 return;
1778
1779 LLVMValueRef buf_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
1780 SI_PARAM_SO_BUFFER);
1781
1782 /* Load the resources, we rely on the code sinking to do the rest */
1783 for (i = 0; i < 4; ++i) {
1784 if (si_shader_ctx->shader->selector->so.stride[i]) {
1785 LLVMValueRef offset = lp_build_const_int32(gallivm, i);
1786
1787 si_shader_ctx->so_buffers[i] = build_indexed_load(si_shader_ctx, buf_ptr, offset);
1788 }
1789 }
1790 }
1791
1792 int si_compile_llvm(struct r600_context *rctx, struct si_pipe_shader *shader,
1793 LLVMModuleRef mod)
1794 {
1795 unsigned i;
1796 uint32_t *ptr;
1797 struct radeon_llvm_binary binary;
1798 bool dump = r600_can_dump_shader(&rctx->screen->b,
1799 shader->selector ? shader->selector->tokens : NULL);
1800 memset(&binary, 0, sizeof(binary));
1801 radeon_llvm_compile(mod, &binary,
1802 r600_get_llvm_processor_name(rctx->screen->b.family), dump);
1803 if (dump && ! binary.disassembled) {
1804 fprintf(stderr, "SI CODE:\n");
1805 for (i = 0; i < binary.code_size; i+=4 ) {
1806 fprintf(stderr, "%02x%02x%02x%02x\n", binary.code[i + 3],
1807 binary.code[i + 2], binary.code[i + 1],
1808 binary.code[i]);
1809 }
1810 }
1811
1812 /* XXX: We may be able to emit some of these values directly rather than
1813 * extracting fields to be emitted later.
1814 */
1815 for (i = 0; i < binary.config_size; i+= 8) {
1816 unsigned reg = util_le32_to_cpu(*(uint32_t*)(binary.config + i));
1817 unsigned value = util_le32_to_cpu(*(uint32_t*)(binary.config + i + 4));
1818 switch (reg) {
1819 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
1820 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
1821 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
1822 case R_00B848_COMPUTE_PGM_RSRC1:
1823 shader->num_sgprs = (G_00B028_SGPRS(value) + 1) * 8;
1824 shader->num_vgprs = (G_00B028_VGPRS(value) + 1) * 4;
1825 break;
1826 case R_00B02C_SPI_SHADER_PGM_RSRC2_PS:
1827 shader->lds_size = G_00B02C_EXTRA_LDS_SIZE(value);
1828 break;
1829 case R_00B84C_COMPUTE_PGM_RSRC2:
1830 shader->lds_size = G_00B84C_LDS_SIZE(value);
1831 break;
1832 case R_0286CC_SPI_PS_INPUT_ENA:
1833 shader->spi_ps_input_ena = value;
1834 break;
1835 default:
1836 fprintf(stderr, "Warning: Compiler emitted unknown "
1837 "config register: 0x%x\n", reg);
1838 break;
1839 }
1840 }
1841
1842 /* copy new shader */
1843 r600_resource_reference(&shader->bo, NULL);
1844 shader->bo = r600_resource_create_custom(rctx->b.b.screen, PIPE_USAGE_IMMUTABLE,
1845 binary.code_size);
1846 if (shader->bo == NULL) {
1847 return -ENOMEM;
1848 }
1849
1850 ptr = (uint32_t*)rctx->b.ws->buffer_map(shader->bo->cs_buf, rctx->b.rings.gfx.cs, PIPE_TRANSFER_WRITE);
1851 if (0 /*R600_BIG_ENDIAN*/) {
1852 for (i = 0; i < binary.code_size / 4; ++i) {
1853 ptr[i] = util_bswap32(*(uint32_t*)(binary.code + i*4));
1854 }
1855 } else {
1856 memcpy(ptr, binary.code, binary.code_size);
1857 }
1858 rctx->b.ws->buffer_unmap(shader->bo->cs_buf);
1859
1860 free(binary.code);
1861 free(binary.config);
1862
1863 return 0;
1864 }
1865
1866 int si_pipe_shader_create(
1867 struct pipe_context *ctx,
1868 struct si_pipe_shader *shader)
1869 {
1870 struct r600_context *rctx = (struct r600_context*)ctx;
1871 struct si_pipe_shader_selector *sel = shader->selector;
1872 struct si_shader_context si_shader_ctx;
1873 struct tgsi_shader_info shader_info;
1874 struct lp_build_tgsi_context * bld_base;
1875 LLVMModuleRef mod;
1876 int r = 0;
1877 bool dump = r600_can_dump_shader(&rctx->screen->b, shader->selector->tokens);
1878
1879 assert(shader->shader.noutput == 0);
1880 assert(shader->shader.ninterp == 0);
1881 assert(shader->shader.ninput == 0);
1882
1883 memset(&si_shader_ctx, 0, sizeof(si_shader_ctx));
1884 radeon_llvm_context_init(&si_shader_ctx.radeon_bld);
1885 bld_base = &si_shader_ctx.radeon_bld.soa.bld_base;
1886
1887 tgsi_scan_shader(sel->tokens, &shader_info);
1888
1889 shader->shader.uses_kill = shader_info.uses_kill;
1890 shader->shader.uses_instanceid = shader_info.uses_instanceid;
1891 bld_base->info = &shader_info;
1892 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
1893 bld_base->emit_epilogue = si_llvm_emit_epilogue;
1894
1895 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
1896 bld_base->op_actions[TGSI_OPCODE_TXB] = txb_action;
1897 #if HAVE_LLVM >= 0x0304
1898 bld_base->op_actions[TGSI_OPCODE_TXD] = txd_action;
1899 #endif
1900 bld_base->op_actions[TGSI_OPCODE_TXF] = txf_action;
1901 bld_base->op_actions[TGSI_OPCODE_TXL] = txl_action;
1902 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
1903 bld_base->op_actions[TGSI_OPCODE_TXQ] = txq_action;
1904
1905 #if HAVE_LLVM >= 0x0304
1906 bld_base->op_actions[TGSI_OPCODE_DDX].emit = si_llvm_emit_ddxy;
1907 bld_base->op_actions[TGSI_OPCODE_DDY].emit = si_llvm_emit_ddxy;
1908 #endif
1909
1910 si_shader_ctx.radeon_bld.load_input = declare_input;
1911 si_shader_ctx.radeon_bld.load_system_value = declare_system_value;
1912 si_shader_ctx.tokens = sel->tokens;
1913 tgsi_parse_init(&si_shader_ctx.parse, si_shader_ctx.tokens);
1914 si_shader_ctx.shader = shader;
1915 si_shader_ctx.type = si_shader_ctx.parse.FullHeader.Processor.Processor;
1916
1917 create_meta_data(&si_shader_ctx);
1918 create_function(&si_shader_ctx);
1919 preload_constants(&si_shader_ctx);
1920 preload_samplers(&si_shader_ctx);
1921 preload_streamout_buffers(&si_shader_ctx);
1922
1923 shader->shader.nr_cbufs = rctx->framebuffer.nr_cbufs;
1924
1925 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
1926 * conversion fails. */
1927 if (dump) {
1928 tgsi_dump(sel->tokens, 0);
1929 si_dump_streamout(&sel->so);
1930 }
1931
1932 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
1933 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
1934 FREE(si_shader_ctx.constants);
1935 FREE(si_shader_ctx.resources);
1936 FREE(si_shader_ctx.samplers);
1937 return -EINVAL;
1938 }
1939
1940 radeon_llvm_finalize_module(&si_shader_ctx.radeon_bld);
1941
1942 mod = bld_base->base.gallivm->module;
1943 r = si_compile_llvm(rctx, shader, mod);
1944
1945 radeon_llvm_dispose(&si_shader_ctx.radeon_bld);
1946 tgsi_parse_free(&si_shader_ctx.parse);
1947
1948 FREE(si_shader_ctx.constants);
1949 FREE(si_shader_ctx.resources);
1950 FREE(si_shader_ctx.samplers);
1951
1952 return r;
1953 }
1954
1955 void si_pipe_shader_destroy(struct pipe_context *ctx, struct si_pipe_shader *shader)
1956 {
1957 r600_resource_reference(&shader->bo, NULL);
1958 }