radeonsi: pass alpha_ref value to PS in the user sgpr
[mesa.git] / src / gallium / drivers / radeonsi / radeonsi_shader.c
1
2 /*
3 * Copyright 2012 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Tom Stellard <thomas.stellard@amd.com>
26 * Michel Dänzer <michel.daenzer@amd.com>
27 * Christian König <christian.koenig@amd.com>
28 */
29
30 #include "gallivm/lp_bld_tgsi_action.h"
31 #include "gallivm/lp_bld_const.h"
32 #include "gallivm/lp_bld_gather.h"
33 #include "gallivm/lp_bld_intr.h"
34 #include "gallivm/lp_bld_logic.h"
35 #include "gallivm/lp_bld_tgsi.h"
36 #include "gallivm/lp_bld_arit.h"
37 #include "gallivm/lp_bld_flow.h"
38 #include "radeon_llvm.h"
39 #include "radeon_llvm_emit.h"
40 #include "util/u_memory.h"
41 #include "tgsi/tgsi_info.h"
42 #include "tgsi/tgsi_parse.h"
43 #include "tgsi/tgsi_scan.h"
44 #include "tgsi/tgsi_util.h"
45 #include "tgsi/tgsi_dump.h"
46
47 #include "radeonsi_pipe.h"
48 #include "radeonsi_shader.h"
49 #include "si_state.h"
50 #include "sid.h"
51
52 #include <assert.h>
53 #include <errno.h>
54 #include <stdio.h>
55
56 struct si_shader_context
57 {
58 struct radeon_llvm_context radeon_bld;
59 struct tgsi_parse_context parse;
60 struct tgsi_token * tokens;
61 struct si_pipe_shader *shader;
62 unsigned type; /* TGSI_PROCESSOR_* specifies the type of shader. */
63 int param_streamout_config;
64 int param_streamout_write_index;
65 int param_streamout_offset[4];
66 int param_vertex_id;
67 int param_instance_id;
68 LLVMValueRef const_md;
69 LLVMValueRef const_resource;
70 #if HAVE_LLVM >= 0x0304
71 LLVMValueRef ddxy_lds;
72 #endif
73 LLVMValueRef *constants;
74 LLVMValueRef *resources;
75 LLVMValueRef *samplers;
76 LLVMValueRef so_buffers[4];
77 };
78
79 static struct si_shader_context * si_shader_context(
80 struct lp_build_tgsi_context * bld_base)
81 {
82 return (struct si_shader_context *)bld_base;
83 }
84
85
86 #define PERSPECTIVE_BASE 0
87 #define LINEAR_BASE 9
88
89 #define SAMPLE_OFFSET 0
90 #define CENTER_OFFSET 2
91 #define CENTROID_OFSET 4
92
93 #define USE_SGPR_MAX_SUFFIX_LEN 5
94 #define CONST_ADDR_SPACE 2
95 #define LOCAL_ADDR_SPACE 3
96 #define USER_SGPR_ADDR_SPACE 8
97
98 /**
99 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad
100 *
101 * @param offset The offset parameter specifies the number of
102 * elements to offset, not the number of bytes or dwords. An element is the
103 * the type pointed to by the base_ptr parameter (e.g. int is the element of
104 * an int* pointer)
105 *
106 * When LLVM lowers the load instruction, it will convert the element offset
107 * into a dword offset automatically.
108 *
109 */
110 static LLVMValueRef build_indexed_load(
111 struct si_shader_context * si_shader_ctx,
112 LLVMValueRef base_ptr,
113 LLVMValueRef offset)
114 {
115 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
116
117 LLVMValueRef computed_ptr = LLVMBuildGEP(
118 base->gallivm->builder, base_ptr, &offset, 1, "");
119
120 LLVMValueRef result = LLVMBuildLoad(base->gallivm->builder, computed_ptr, "");
121 LLVMSetMetadata(result, 1, si_shader_ctx->const_md);
122 return result;
123 }
124
125 static LLVMValueRef get_instance_index_for_fetch(
126 struct radeon_llvm_context * radeon_bld,
127 unsigned divisor)
128 {
129 struct si_shader_context *si_shader_ctx =
130 si_shader_context(&radeon_bld->soa.bld_base);
131 struct gallivm_state * gallivm = radeon_bld->soa.bld_base.base.gallivm;
132
133 LLVMValueRef result = LLVMGetParam(radeon_bld->main_fn,
134 si_shader_ctx->param_instance_id);
135 result = LLVMBuildAdd(gallivm->builder, result, LLVMGetParam(
136 radeon_bld->main_fn, SI_PARAM_START_INSTANCE), "");
137
138 if (divisor > 1)
139 result = LLVMBuildUDiv(gallivm->builder, result,
140 lp_build_const_int32(gallivm, divisor), "");
141
142 return result;
143 }
144
145 static void declare_input_vs(
146 struct si_shader_context * si_shader_ctx,
147 unsigned input_index,
148 const struct tgsi_full_declaration *decl)
149 {
150 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
151 unsigned divisor = si_shader_ctx->shader->key.vs.instance_divisors[input_index];
152
153 unsigned chan;
154
155 LLVMValueRef t_list_ptr;
156 LLVMValueRef t_offset;
157 LLVMValueRef t_list;
158 LLVMValueRef attribute_offset;
159 LLVMValueRef buffer_index;
160 LLVMValueRef args[3];
161 LLVMTypeRef vec4_type;
162 LLVMValueRef input;
163
164 /* Load the T list */
165 t_list_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_VERTEX_BUFFER);
166
167 t_offset = lp_build_const_int32(base->gallivm, input_index);
168
169 t_list = build_indexed_load(si_shader_ctx, t_list_ptr, t_offset);
170
171 /* Build the attribute offset */
172 attribute_offset = lp_build_const_int32(base->gallivm, 0);
173
174 if (divisor) {
175 /* Build index from instance ID, start instance and divisor */
176 si_shader_ctx->shader->shader.uses_instanceid = true;
177 buffer_index = get_instance_index_for_fetch(&si_shader_ctx->radeon_bld, divisor);
178 } else {
179 /* Load the buffer index, which is always stored in VGPR0
180 * for Vertex Shaders */
181 buffer_index = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
182 si_shader_ctx->param_vertex_id);
183 }
184
185 vec4_type = LLVMVectorType(base->elem_type, 4);
186 args[0] = t_list;
187 args[1] = attribute_offset;
188 args[2] = buffer_index;
189 input = build_intrinsic(base->gallivm->builder,
190 "llvm.SI.vs.load.input", vec4_type, args, 3,
191 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
192
193 /* Break up the vec4 into individual components */
194 for (chan = 0; chan < 4; chan++) {
195 LLVMValueRef llvm_chan = lp_build_const_int32(base->gallivm, chan);
196 /* XXX: Use a helper function for this. There is one in
197 * tgsi_llvm.c. */
198 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, chan)] =
199 LLVMBuildExtractElement(base->gallivm->builder,
200 input, llvm_chan, "");
201 }
202 }
203
204 static void declare_input_fs(
205 struct si_shader_context * si_shader_ctx,
206 unsigned input_index,
207 const struct tgsi_full_declaration *decl)
208 {
209 struct si_shader *shader = &si_shader_ctx->shader->shader;
210 struct lp_build_context * base =
211 &si_shader_ctx->radeon_bld.soa.bld_base.base;
212 struct lp_build_context *uint =
213 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
214 struct gallivm_state * gallivm = base->gallivm;
215 LLVMTypeRef input_type = LLVMFloatTypeInContext(gallivm->context);
216 LLVMValueRef main_fn = si_shader_ctx->radeon_bld.main_fn;
217
218 LLVMValueRef interp_param;
219 const char * intr_name;
220
221 /* This value is:
222 * [15:0] NewPrimMask (Bit mask for each quad. It is set it the
223 * quad begins a new primitive. Bit 0 always needs
224 * to be unset)
225 * [32:16] ParamOffset
226 *
227 */
228 LLVMValueRef params = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_PRIM_MASK);
229 LLVMValueRef attr_number;
230
231 unsigned chan;
232
233 if (decl->Semantic.Name == TGSI_SEMANTIC_POSITION) {
234 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
235 unsigned soa_index =
236 radeon_llvm_reg_index_soa(input_index, chan);
237 si_shader_ctx->radeon_bld.inputs[soa_index] =
238 LLVMGetParam(main_fn, SI_PARAM_POS_X_FLOAT + chan);
239
240 if (chan == 3)
241 /* RCP for fragcoord.w */
242 si_shader_ctx->radeon_bld.inputs[soa_index] =
243 LLVMBuildFDiv(gallivm->builder,
244 lp_build_const_float(gallivm, 1.0f),
245 si_shader_ctx->radeon_bld.inputs[soa_index],
246 "");
247 }
248 return;
249 }
250
251 if (decl->Semantic.Name == TGSI_SEMANTIC_FACE) {
252 LLVMValueRef face, is_face_positive;
253
254 face = LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE);
255
256 is_face_positive = LLVMBuildFCmp(gallivm->builder,
257 LLVMRealUGT, face,
258 lp_build_const_float(gallivm, 0.0f),
259 "");
260
261 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 0)] =
262 LLVMBuildSelect(gallivm->builder,
263 is_face_positive,
264 lp_build_const_float(gallivm, 1.0f),
265 lp_build_const_float(gallivm, 0.0f),
266 "");
267 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 1)] =
268 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 2)] =
269 lp_build_const_float(gallivm, 0.0f);
270 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 3)] =
271 lp_build_const_float(gallivm, 1.0f);
272
273 return;
274 }
275
276 shader->input[input_index].param_offset = shader->ninterp++;
277 attr_number = lp_build_const_int32(gallivm,
278 shader->input[input_index].param_offset);
279
280 /* XXX: Handle all possible interpolation modes */
281 switch (decl->Interp.Interpolate) {
282 case TGSI_INTERPOLATE_COLOR:
283 if (si_shader_ctx->shader->key.ps.flatshade) {
284 interp_param = 0;
285 } else {
286 if (decl->Interp.Centroid)
287 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTROID);
288 else
289 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTER);
290 }
291 break;
292 case TGSI_INTERPOLATE_CONSTANT:
293 interp_param = 0;
294 break;
295 case TGSI_INTERPOLATE_LINEAR:
296 if (decl->Interp.Centroid)
297 interp_param = LLVMGetParam(main_fn, SI_PARAM_LINEAR_CENTROID);
298 else
299 interp_param = LLVMGetParam(main_fn, SI_PARAM_LINEAR_CENTER);
300 break;
301 case TGSI_INTERPOLATE_PERSPECTIVE:
302 if (decl->Interp.Centroid)
303 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTROID);
304 else
305 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTER);
306 break;
307 default:
308 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
309 return;
310 }
311
312 intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
313
314 /* XXX: Could there be more than TGSI_NUM_CHANNELS (4) ? */
315 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR &&
316 si_shader_ctx->shader->key.ps.color_two_side) {
317 LLVMValueRef args[4];
318 LLVMValueRef face, is_face_positive;
319 LLVMValueRef back_attr_number =
320 lp_build_const_int32(gallivm,
321 shader->input[input_index].param_offset + 1);
322
323 face = LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE);
324
325 is_face_positive = LLVMBuildFCmp(gallivm->builder,
326 LLVMRealUGT, face,
327 lp_build_const_float(gallivm, 0.0f),
328 "");
329
330 args[2] = params;
331 args[3] = interp_param;
332 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
333 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
334 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
335 LLVMValueRef front, back;
336
337 args[0] = llvm_chan;
338 args[1] = attr_number;
339 front = build_intrinsic(base->gallivm->builder, intr_name,
340 input_type, args, args[3] ? 4 : 3,
341 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
342
343 args[1] = back_attr_number;
344 back = build_intrinsic(base->gallivm->builder, intr_name,
345 input_type, args, args[3] ? 4 : 3,
346 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
347
348 si_shader_ctx->radeon_bld.inputs[soa_index] =
349 LLVMBuildSelect(gallivm->builder,
350 is_face_positive,
351 front,
352 back,
353 "");
354 }
355
356 shader->ninterp++;
357 } else if (decl->Semantic.Name == TGSI_SEMANTIC_FOG) {
358 LLVMValueRef args[4];
359
360 args[0] = uint->zero;
361 args[1] = attr_number;
362 args[2] = params;
363 args[3] = interp_param;
364 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 0)] =
365 build_intrinsic(base->gallivm->builder, intr_name,
366 input_type, args, args[3] ? 4 : 3,
367 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
368 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 1)] =
369 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 2)] =
370 lp_build_const_float(gallivm, 0.0f);
371 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 3)] =
372 lp_build_const_float(gallivm, 1.0f);
373 } else {
374 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
375 LLVMValueRef args[4];
376 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
377 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
378 args[0] = llvm_chan;
379 args[1] = attr_number;
380 args[2] = params;
381 args[3] = interp_param;
382 si_shader_ctx->radeon_bld.inputs[soa_index] =
383 build_intrinsic(base->gallivm->builder, intr_name,
384 input_type, args, args[3] ? 4 : 3,
385 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
386 }
387 }
388 }
389
390 static void declare_input(
391 struct radeon_llvm_context * radeon_bld,
392 unsigned input_index,
393 const struct tgsi_full_declaration *decl)
394 {
395 struct si_shader_context * si_shader_ctx =
396 si_shader_context(&radeon_bld->soa.bld_base);
397 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
398 declare_input_vs(si_shader_ctx, input_index, decl);
399 } else if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
400 declare_input_fs(si_shader_ctx, input_index, decl);
401 } else {
402 fprintf(stderr, "Warning: Unsupported shader type,\n");
403 }
404 }
405
406 static void declare_system_value(
407 struct radeon_llvm_context * radeon_bld,
408 unsigned index,
409 const struct tgsi_full_declaration *decl)
410 {
411 struct si_shader_context *si_shader_ctx =
412 si_shader_context(&radeon_bld->soa.bld_base);
413 LLVMValueRef value = 0;
414
415 switch (decl->Semantic.Name) {
416 case TGSI_SEMANTIC_INSTANCEID:
417 value = LLVMGetParam(radeon_bld->main_fn,
418 si_shader_ctx->param_instance_id);
419 break;
420
421 case TGSI_SEMANTIC_VERTEXID:
422 value = LLVMGetParam(radeon_bld->main_fn,
423 si_shader_ctx->param_vertex_id);
424 break;
425
426 default:
427 assert(!"unknown system value");
428 return;
429 }
430
431 radeon_bld->system_values[index] = value;
432 }
433
434 static LLVMValueRef fetch_constant(
435 struct lp_build_tgsi_context * bld_base,
436 const struct tgsi_full_src_register *reg,
437 enum tgsi_opcode_type type,
438 unsigned swizzle)
439 {
440 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
441 struct lp_build_context * base = &bld_base->base;
442 const struct tgsi_ind_register *ireg = &reg->Indirect;
443 unsigned idx;
444
445 LLVMValueRef args[2];
446 LLVMValueRef addr;
447 LLVMValueRef result;
448
449 if (swizzle == LP_CHAN_ALL) {
450 unsigned chan;
451 LLVMValueRef values[4];
452 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
453 values[chan] = fetch_constant(bld_base, reg, type, chan);
454
455 return lp_build_gather_values(bld_base->base.gallivm, values, 4);
456 }
457
458 idx = reg->Register.Index * 4 + swizzle;
459 if (!reg->Register.Indirect)
460 return bitcast(bld_base, type, si_shader_ctx->constants[idx]);
461
462 args[0] = si_shader_ctx->const_resource;
463 args[1] = lp_build_const_int32(base->gallivm, idx * 4);
464 addr = si_shader_ctx->radeon_bld.soa.addr[ireg->Index][ireg->Swizzle];
465 addr = LLVMBuildLoad(base->gallivm->builder, addr, "load addr reg");
466 addr = lp_build_mul_imm(&bld_base->uint_bld, addr, 16);
467 args[1] = lp_build_add(&bld_base->uint_bld, addr, args[1]);
468
469 result = build_intrinsic(base->gallivm->builder, "llvm.SI.load.const", base->elem_type,
470 args, 2, LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
471
472 return bitcast(bld_base, type, result);
473 }
474
475 /* Initialize arguments for the shader export intrinsic */
476 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
477 struct tgsi_full_declaration *d,
478 unsigned index,
479 unsigned target,
480 LLVMValueRef *args)
481 {
482 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
483 struct lp_build_context *uint =
484 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
485 struct lp_build_context *base = &bld_base->base;
486 unsigned compressed = 0;
487 unsigned chan;
488
489 if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
490 int cbuf = target - V_008DFC_SQ_EXP_MRT;
491
492 if (cbuf >= 0 && cbuf < 8) {
493 compressed = (si_shader_ctx->shader->key.ps.export_16bpc >> cbuf) & 0x1;
494
495 if (compressed)
496 si_shader_ctx->shader->spi_shader_col_format |=
497 V_028714_SPI_SHADER_FP16_ABGR << (4 * cbuf);
498 else
499 si_shader_ctx->shader->spi_shader_col_format |=
500 V_028714_SPI_SHADER_32_ABGR << (4 * cbuf);
501
502 si_shader_ctx->shader->cb_shader_mask |= 0xf << (4 * cbuf);
503 }
504 }
505
506 if (compressed) {
507 /* Pixel shader needs to pack output values before export */
508 for (chan = 0; chan < 2; chan++ ) {
509 LLVMValueRef *out_ptr =
510 si_shader_ctx->radeon_bld.soa.outputs[index];
511 args[0] = LLVMBuildLoad(base->gallivm->builder,
512 out_ptr[2 * chan], "");
513 args[1] = LLVMBuildLoad(base->gallivm->builder,
514 out_ptr[2 * chan + 1], "");
515 args[chan + 5] =
516 build_intrinsic(base->gallivm->builder,
517 "llvm.SI.packf16",
518 LLVMInt32TypeInContext(base->gallivm->context),
519 args, 2,
520 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
521 args[chan + 7] = args[chan + 5] =
522 LLVMBuildBitCast(base->gallivm->builder,
523 args[chan + 5],
524 LLVMFloatTypeInContext(base->gallivm->context),
525 "");
526 }
527
528 /* Set COMPR flag */
529 args[4] = uint->one;
530 } else {
531 for (chan = 0; chan < 4; chan++ ) {
532 LLVMValueRef out_ptr =
533 si_shader_ctx->radeon_bld.soa.outputs[index][chan];
534 /* +5 because the first output value will be
535 * the 6th argument to the intrinsic. */
536 args[chan + 5] = LLVMBuildLoad(base->gallivm->builder,
537 out_ptr, "");
538 }
539
540 /* Clear COMPR flag */
541 args[4] = uint->zero;
542 }
543
544 /* XXX: This controls which components of the output
545 * registers actually get exported. (e.g bit 0 means export
546 * X component, bit 1 means export Y component, etc.) I'm
547 * hard coding this to 0xf for now. In the future, we might
548 * want to do something else. */
549 args[0] = lp_build_const_int32(base->gallivm, 0xf);
550
551 /* Specify whether the EXEC mask represents the valid mask */
552 args[1] = uint->zero;
553
554 /* Specify whether this is the last export */
555 args[2] = uint->zero;
556
557 /* Specify the target we are exporting */
558 args[3] = lp_build_const_int32(base->gallivm, target);
559
560 /* XXX: We probably need to keep track of the output
561 * values, so we know what we are passing to the next
562 * stage. */
563 }
564
565 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
566 unsigned index)
567 {
568 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
569 struct gallivm_state *gallivm = bld_base->base.gallivm;
570
571 if (si_shader_ctx->shader->key.ps.alpha_func != PIPE_FUNC_NEVER) {
572 LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][3];
573 LLVMValueRef alpha_ref = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
574 SI_PARAM_ALPHA_REF);
575
576 LLVMValueRef alpha_pass =
577 lp_build_cmp(&bld_base->base,
578 si_shader_ctx->shader->key.ps.alpha_func,
579 LLVMBuildLoad(gallivm->builder, out_ptr, ""),
580 alpha_ref);
581 LLVMValueRef arg =
582 lp_build_select(&bld_base->base,
583 alpha_pass,
584 lp_build_const_float(gallivm, 1.0f),
585 lp_build_const_float(gallivm, -1.0f));
586
587 build_intrinsic(gallivm->builder,
588 "llvm.AMDGPU.kill",
589 LLVMVoidTypeInContext(gallivm->context),
590 &arg, 1, 0);
591 } else {
592 build_intrinsic(gallivm->builder,
593 "llvm.AMDGPU.kilp",
594 LLVMVoidTypeInContext(gallivm->context),
595 NULL, 0, 0);
596 }
597 }
598
599 static void si_alpha_to_one(struct lp_build_tgsi_context *bld_base,
600 unsigned index)
601 {
602 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
603
604 /* set alpha to one */
605 LLVMBuildStore(bld_base->base.gallivm->builder,
606 bld_base->base.one,
607 si_shader_ctx->radeon_bld.soa.outputs[index][3]);
608 }
609
610 static void si_llvm_emit_clipvertex(struct lp_build_tgsi_context * bld_base,
611 LLVMValueRef (*pos)[9], unsigned index)
612 {
613 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
614 struct si_pipe_shader *shader = si_shader_ctx->shader;
615 struct lp_build_context *base = &bld_base->base;
616 struct lp_build_context *uint = &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
617 unsigned reg_index;
618 unsigned chan;
619 unsigned const_chan;
620 LLVMValueRef out_elts[4];
621 LLVMValueRef base_elt;
622 LLVMValueRef ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
623 LLVMValueRef const_resource = build_indexed_load(si_shader_ctx, ptr, uint->one);
624
625 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
626 LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][chan];
627 out_elts[chan] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
628 }
629
630 for (reg_index = 0; reg_index < 2; reg_index ++) {
631 LLVMValueRef *args = pos[2 + reg_index];
632
633 if (!(shader->key.vs.ucps_enabled & (1 << reg_index)))
634 continue;
635
636 shader->shader.clip_dist_write |= 0xf << (4 * reg_index);
637
638 args[5] =
639 args[6] =
640 args[7] =
641 args[8] = lp_build_const_float(base->gallivm, 0.0f);
642
643 /* Compute dot products of position and user clip plane vectors */
644 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
645 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
646 args[0] = const_resource;
647 args[1] = lp_build_const_int32(base->gallivm,
648 ((reg_index * 4 + chan) * 4 +
649 const_chan) * 4);
650 base_elt = build_intrinsic(base->gallivm->builder,
651 "llvm.SI.load.const",
652 base->elem_type,
653 args, 2,
654 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
655 args[5 + chan] =
656 lp_build_add(base, args[5 + chan],
657 lp_build_mul(base, base_elt,
658 out_elts[const_chan]));
659 }
660 }
661
662 args[0] = lp_build_const_int32(base->gallivm, 0xf);
663 args[1] = uint->zero;
664 args[2] = uint->zero;
665 args[3] = lp_build_const_int32(base->gallivm,
666 V_008DFC_SQ_EXP_POS + 2 + reg_index);
667 args[4] = uint->zero;
668 }
669 }
670
671 static void si_dump_streamout(struct pipe_stream_output_info *so)
672 {
673 unsigned i;
674
675 if (so->num_outputs)
676 fprintf(stderr, "STREAMOUT\n");
677
678 for (i = 0; i < so->num_outputs; i++) {
679 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
680 so->output[i].start_component;
681 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
682 i, so->output[i].output_buffer,
683 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
684 so->output[i].register_index,
685 mask & 1 ? "x" : "",
686 mask & 2 ? "y" : "",
687 mask & 4 ? "z" : "",
688 mask & 8 ? "w" : "");
689 }
690 }
691
692 /* TBUFFER_STORE_FORMAT_{X,XY,XYZ,XYZW} <- the suffix is selected by num_channels=1..4.
693 * The type of vdata must be one of i32 (num_channels=1), v2i32 (num_channels=2),
694 * or v4i32 (num_channels=3,4). */
695 static void build_tbuffer_store(struct si_shader_context *shader,
696 LLVMValueRef rsrc,
697 LLVMValueRef vdata,
698 unsigned num_channels,
699 LLVMValueRef vaddr,
700 LLVMValueRef soffset,
701 unsigned inst_offset,
702 unsigned dfmt,
703 unsigned nfmt,
704 unsigned offen,
705 unsigned idxen,
706 unsigned glc,
707 unsigned slc,
708 unsigned tfe)
709 {
710 struct gallivm_state *gallivm = &shader->radeon_bld.gallivm;
711 LLVMTypeRef i32 = LLVMInt32TypeInContext(gallivm->context);
712 LLVMValueRef args[] = {
713 rsrc,
714 vdata,
715 LLVMConstInt(i32, num_channels, 0),
716 vaddr,
717 soffset,
718 LLVMConstInt(i32, inst_offset, 0),
719 LLVMConstInt(i32, dfmt, 0),
720 LLVMConstInt(i32, nfmt, 0),
721 LLVMConstInt(i32, offen, 0),
722 LLVMConstInt(i32, idxen, 0),
723 LLVMConstInt(i32, glc, 0),
724 LLVMConstInt(i32, slc, 0),
725 LLVMConstInt(i32, tfe, 0)
726 };
727
728 /* The intrinsic is overloaded, we need to add a type suffix for overloading to work. */
729 unsigned func = CLAMP(num_channels, 1, 3) - 1;
730 const char *types[] = {"i32", "v2i32", "v4i32"};
731 char name[256];
732 snprintf(name, sizeof(name), "llvm.SI.tbuffer.store.%s", types[func]);
733
734 lp_build_intrinsic(gallivm->builder, name,
735 LLVMVoidTypeInContext(gallivm->context),
736 args, Elements(args));
737 }
738
739 static void build_streamout_store(struct si_shader_context *shader,
740 LLVMValueRef rsrc,
741 LLVMValueRef vdata,
742 unsigned num_channels,
743 LLVMValueRef vaddr,
744 LLVMValueRef soffset,
745 unsigned inst_offset)
746 {
747 static unsigned dfmt[] = {
748 V_008F0C_BUF_DATA_FORMAT_32,
749 V_008F0C_BUF_DATA_FORMAT_32_32,
750 V_008F0C_BUF_DATA_FORMAT_32_32_32,
751 V_008F0C_BUF_DATA_FORMAT_32_32_32_32
752 };
753 assert(num_channels >= 1 && num_channels <= 4);
754
755 build_tbuffer_store(shader, rsrc, vdata, num_channels, vaddr, soffset,
756 inst_offset, dfmt[num_channels-1],
757 V_008F0C_BUF_NUM_FORMAT_UINT, 1, 0, 1, 1, 0);
758 }
759
760 /* On SI, the vertex shader is responsible for writing streamout data
761 * to buffers. */
762 static void si_llvm_emit_streamout(struct si_shader_context *shader)
763 {
764 struct pipe_stream_output_info *so = &shader->shader->selector->so;
765 struct gallivm_state *gallivm = &shader->radeon_bld.gallivm;
766 LLVMBuilderRef builder = gallivm->builder;
767 int i, j;
768 struct lp_build_if_state if_ctx;
769
770 LLVMTypeRef i32 = LLVMInt32TypeInContext(gallivm->context);
771
772 LLVMValueRef so_param =
773 LLVMGetParam(shader->radeon_bld.main_fn,
774 shader->param_streamout_config);
775
776 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
777 LLVMValueRef so_vtx_count =
778 LLVMBuildAnd(builder,
779 LLVMBuildLShr(builder, so_param,
780 LLVMConstInt(i32, 16, 0), ""),
781 LLVMConstInt(i32, 127, 0), "");
782
783 LLVMValueRef tid = build_intrinsic(builder, "llvm.SI.tid", i32,
784 NULL, 0, LLVMReadNoneAttribute);
785
786 /* can_emit = tid < so_vtx_count; */
787 LLVMValueRef can_emit =
788 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
789
790 /* Emit the streamout code conditionally. This actually avoids
791 * out-of-bounds buffer access. The hw tells us via the SGPR
792 * (so_vtx_count) which threads are allowed to emit streamout data. */
793 lp_build_if(&if_ctx, gallivm, can_emit);
794 {
795 /* The buffer offset is computed as follows:
796 * ByteOffset = streamout_offset[buffer_id]*4 +
797 * (streamout_write_index + thread_id)*stride[buffer_id] +
798 * attrib_offset
799 */
800
801 LLVMValueRef so_write_index =
802 LLVMGetParam(shader->radeon_bld.main_fn,
803 shader->param_streamout_write_index);
804
805 /* Compute (streamout_write_index + thread_id). */
806 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
807
808 /* Compute the write offset for each enabled buffer. */
809 LLVMValueRef so_write_offset[4] = {};
810 for (i = 0; i < 4; i++) {
811 if (!so->stride[i])
812 continue;
813
814 LLVMValueRef so_offset = LLVMGetParam(shader->radeon_bld.main_fn,
815 shader->param_streamout_offset[i]);
816 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(i32, 4, 0), "");
817
818 so_write_offset[i] = LLVMBuildMul(builder, so_write_index,
819 LLVMConstInt(i32, so->stride[i]*4, 0), "");
820 so_write_offset[i] = LLVMBuildAdd(builder, so_write_offset[i], so_offset, "");
821 }
822
823 LLVMValueRef (*outputs)[TGSI_NUM_CHANNELS] = shader->radeon_bld.soa.outputs;
824
825 /* Write streamout data. */
826 for (i = 0; i < so->num_outputs; i++) {
827 unsigned buf_idx = so->output[i].output_buffer;
828 unsigned reg = so->output[i].register_index;
829 unsigned start = so->output[i].start_component;
830 unsigned num_comps = so->output[i].num_components;
831 LLVMValueRef out[4];
832
833 assert(num_comps && num_comps <= 4);
834 if (!num_comps || num_comps > 4)
835 continue;
836
837 /* Load the output as int. */
838 for (j = 0; j < num_comps; j++) {
839 out[j] = LLVMBuildLoad(builder, outputs[reg][start+j], "");
840 out[j] = LLVMBuildBitCast(builder, out[j], i32, "");
841 }
842
843 /* Pack the output. */
844 LLVMValueRef vdata = NULL;
845
846 switch (num_comps) {
847 case 1: /* as i32 */
848 vdata = out[0];
849 break;
850 case 2: /* as v2i32 */
851 case 3: /* as v4i32 (aligned to 4) */
852 case 4: /* as v4i32 */
853 vdata = LLVMGetUndef(LLVMVectorType(i32, util_next_power_of_two(num_comps)));
854 for (j = 0; j < num_comps; j++) {
855 vdata = LLVMBuildInsertElement(builder, vdata, out[j],
856 LLVMConstInt(i32, j, 0), "");
857 }
858 break;
859 }
860
861 build_streamout_store(shader, shader->so_buffers[buf_idx],
862 vdata, num_comps,
863 so_write_offset[buf_idx],
864 LLVMConstInt(i32, 0, 0),
865 so->output[i].dst_offset*4);
866 }
867 }
868 lp_build_endif(&if_ctx);
869 }
870
871 /* XXX: This is partially implemented for VS only at this point. It is not complete */
872 static void si_llvm_emit_epilogue(struct lp_build_tgsi_context * bld_base)
873 {
874 struct si_shader_context * si_shader_ctx = si_shader_context(bld_base);
875 struct si_shader * shader = &si_shader_ctx->shader->shader;
876 struct lp_build_context * base = &bld_base->base;
877 struct lp_build_context * uint =
878 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
879 struct tgsi_parse_context *parse = &si_shader_ctx->parse;
880 LLVMValueRef args[9];
881 LLVMValueRef last_args[9] = { 0 };
882 LLVMValueRef pos_args[4][9] = { { 0 } };
883 unsigned semantic_name;
884 unsigned color_count = 0;
885 unsigned param_count = 0;
886 int depth_index = -1, stencil_index = -1;
887 int i;
888
889 if (si_shader_ctx->shader->selector->so.num_outputs) {
890 si_llvm_emit_streamout(si_shader_ctx);
891 }
892
893 while (!tgsi_parse_end_of_tokens(parse)) {
894 struct tgsi_full_declaration *d =
895 &parse->FullToken.FullDeclaration;
896 unsigned target;
897 unsigned index;
898
899 tgsi_parse_token(parse);
900
901 if (parse->FullToken.Token.Type == TGSI_TOKEN_TYPE_PROPERTY &&
902 parse->FullToken.FullProperty.Property.PropertyName ==
903 TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS)
904 shader->fs_write_all = TRUE;
905
906 if (parse->FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
907 continue;
908
909 switch (d->Declaration.File) {
910 case TGSI_FILE_INPUT:
911 i = shader->ninput++;
912 assert(i < Elements(shader->input));
913 shader->input[i].name = d->Semantic.Name;
914 shader->input[i].sid = d->Semantic.Index;
915 shader->input[i].interpolate = d->Interp.Interpolate;
916 shader->input[i].centroid = d->Interp.Centroid;
917 continue;
918
919 case TGSI_FILE_OUTPUT:
920 i = shader->noutput++;
921 assert(i < Elements(shader->output));
922 shader->output[i].name = d->Semantic.Name;
923 shader->output[i].sid = d->Semantic.Index;
924 shader->output[i].interpolate = d->Interp.Interpolate;
925 break;
926
927 default:
928 continue;
929 }
930
931 semantic_name = d->Semantic.Name;
932 handle_semantic:
933 for (index = d->Range.First; index <= d->Range.Last; index++) {
934 /* Select the correct target */
935 switch(semantic_name) {
936 case TGSI_SEMANTIC_PSIZE:
937 shader->vs_out_misc_write = 1;
938 shader->vs_out_point_size = 1;
939 target = V_008DFC_SQ_EXP_POS + 1;
940 break;
941 case TGSI_SEMANTIC_POSITION:
942 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
943 target = V_008DFC_SQ_EXP_POS;
944 break;
945 } else {
946 depth_index = index;
947 continue;
948 }
949 case TGSI_SEMANTIC_STENCIL:
950 stencil_index = index;
951 continue;
952 case TGSI_SEMANTIC_COLOR:
953 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
954 case TGSI_SEMANTIC_BCOLOR:
955 target = V_008DFC_SQ_EXP_PARAM + param_count;
956 shader->output[i].param_offset = param_count;
957 param_count++;
958 } else {
959 target = V_008DFC_SQ_EXP_MRT + color_count;
960 if (si_shader_ctx->shader->key.ps.alpha_to_one) {
961 si_alpha_to_one(bld_base, index);
962 }
963 if (color_count == 0 &&
964 si_shader_ctx->shader->key.ps.alpha_func != PIPE_FUNC_ALWAYS)
965 si_alpha_test(bld_base, index);
966
967 color_count++;
968 }
969 break;
970 case TGSI_SEMANTIC_CLIPDIST:
971 if (!(si_shader_ctx->shader->key.vs.ucps_enabled &
972 (1 << d->Semantic.Index)))
973 continue;
974 shader->clip_dist_write |=
975 d->Declaration.UsageMask << (d->Semantic.Index << 2);
976 target = V_008DFC_SQ_EXP_POS + 2 + d->Semantic.Index;
977 break;
978 case TGSI_SEMANTIC_CLIPVERTEX:
979 si_llvm_emit_clipvertex(bld_base, pos_args, index);
980 continue;
981 case TGSI_SEMANTIC_FOG:
982 case TGSI_SEMANTIC_GENERIC:
983 target = V_008DFC_SQ_EXP_PARAM + param_count;
984 shader->output[i].param_offset = param_count;
985 param_count++;
986 break;
987 default:
988 target = 0;
989 fprintf(stderr,
990 "Warning: SI unhandled output type:%d\n",
991 semantic_name);
992 }
993
994 si_llvm_init_export_args(bld_base, d, index, target, args);
995
996 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX &&
997 target >= V_008DFC_SQ_EXP_POS &&
998 target <= (V_008DFC_SQ_EXP_POS + 3)) {
999 memcpy(pos_args[target - V_008DFC_SQ_EXP_POS],
1000 args, sizeof(args));
1001 } else if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT &&
1002 semantic_name == TGSI_SEMANTIC_COLOR) {
1003 if (last_args[0]) {
1004 lp_build_intrinsic(base->gallivm->builder,
1005 "llvm.SI.export",
1006 LLVMVoidTypeInContext(base->gallivm->context),
1007 last_args, 9);
1008 }
1009
1010 memcpy(last_args, args, sizeof(args));
1011 } else {
1012 lp_build_intrinsic(base->gallivm->builder,
1013 "llvm.SI.export",
1014 LLVMVoidTypeInContext(base->gallivm->context),
1015 args, 9);
1016 }
1017
1018 }
1019
1020 if (semantic_name == TGSI_SEMANTIC_CLIPDIST) {
1021 semantic_name = TGSI_SEMANTIC_GENERIC;
1022 goto handle_semantic;
1023 }
1024 }
1025
1026 if (depth_index >= 0 || stencil_index >= 0) {
1027 LLVMValueRef out_ptr;
1028 unsigned mask = 0;
1029
1030 /* Specify the target we are exporting */
1031 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRTZ);
1032
1033 if (depth_index >= 0) {
1034 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[depth_index][2];
1035 args[5] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
1036 mask |= 0x1;
1037
1038 if (stencil_index < 0) {
1039 args[6] =
1040 args[7] =
1041 args[8] = args[5];
1042 }
1043 }
1044
1045 if (stencil_index >= 0) {
1046 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[stencil_index][1];
1047 args[7] =
1048 args[8] =
1049 args[6] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
1050 /* Only setting the stencil component bit (0x2) here
1051 * breaks some stencil piglit tests
1052 */
1053 mask |= 0x3;
1054
1055 if (depth_index < 0)
1056 args[5] = args[6];
1057 }
1058
1059 /* Specify which components to enable */
1060 args[0] = lp_build_const_int32(base->gallivm, mask);
1061
1062 args[1] =
1063 args[2] =
1064 args[4] = uint->zero;
1065
1066 if (last_args[0])
1067 lp_build_intrinsic(base->gallivm->builder,
1068 "llvm.SI.export",
1069 LLVMVoidTypeInContext(base->gallivm->context),
1070 args, 9);
1071 else
1072 memcpy(last_args, args, sizeof(args));
1073 }
1074
1075 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
1076 unsigned pos_idx = 0;
1077
1078 for (i = 0; i < 4; i++)
1079 if (pos_args[i][0])
1080 shader->nr_pos_exports++;
1081
1082 for (i = 0; i < 4; i++) {
1083 if (!pos_args[i][0])
1084 continue;
1085
1086 /* Specify the target we are exporting */
1087 pos_args[i][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS + pos_idx++);
1088
1089 if (pos_idx == shader->nr_pos_exports)
1090 /* Specify that this is the last export */
1091 pos_args[i][2] = uint->one;
1092
1093 lp_build_intrinsic(base->gallivm->builder,
1094 "llvm.SI.export",
1095 LLVMVoidTypeInContext(base->gallivm->context),
1096 pos_args[i], 9);
1097 }
1098 } else {
1099 if (!last_args[0]) {
1100 /* Specify which components to enable */
1101 last_args[0] = lp_build_const_int32(base->gallivm, 0x0);
1102
1103 /* Specify the target we are exporting */
1104 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
1105
1106 /* Set COMPR flag to zero to export data as 32-bit */
1107 last_args[4] = uint->zero;
1108
1109 /* dummy bits */
1110 last_args[5]= uint->zero;
1111 last_args[6]= uint->zero;
1112 last_args[7]= uint->zero;
1113 last_args[8]= uint->zero;
1114
1115 si_shader_ctx->shader->spi_shader_col_format |=
1116 V_028714_SPI_SHADER_32_ABGR;
1117 si_shader_ctx->shader->cb_shader_mask |= S_02823C_OUTPUT0_ENABLE(0xf);
1118 }
1119
1120 /* Specify whether the EXEC mask represents the valid mask */
1121 last_args[1] = uint->one;
1122
1123 if (shader->fs_write_all && shader->nr_cbufs > 1) {
1124 int i;
1125
1126 /* Specify that this is not yet the last export */
1127 last_args[2] = lp_build_const_int32(base->gallivm, 0);
1128
1129 for (i = 1; i < shader->nr_cbufs; i++) {
1130 /* Specify the target we are exporting */
1131 last_args[3] = lp_build_const_int32(base->gallivm,
1132 V_008DFC_SQ_EXP_MRT + i);
1133
1134 lp_build_intrinsic(base->gallivm->builder,
1135 "llvm.SI.export",
1136 LLVMVoidTypeInContext(base->gallivm->context),
1137 last_args, 9);
1138
1139 si_shader_ctx->shader->spi_shader_col_format |=
1140 si_shader_ctx->shader->spi_shader_col_format << 4;
1141 si_shader_ctx->shader->cb_shader_mask |=
1142 si_shader_ctx->shader->cb_shader_mask << 4;
1143 }
1144
1145 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
1146 }
1147
1148 /* Specify that this is the last export */
1149 last_args[2] = lp_build_const_int32(base->gallivm, 1);
1150
1151 lp_build_intrinsic(base->gallivm->builder,
1152 "llvm.SI.export",
1153 LLVMVoidTypeInContext(base->gallivm->context),
1154 last_args, 9);
1155 }
1156 /* XXX: Look up what this function does */
1157 /* ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);*/
1158 }
1159
1160 static const struct lp_build_tgsi_action txf_action;
1161
1162 static void build_tex_intrinsic(const struct lp_build_tgsi_action * action,
1163 struct lp_build_tgsi_context * bld_base,
1164 struct lp_build_emit_data * emit_data);
1165
1166 static void tex_fetch_args(
1167 struct lp_build_tgsi_context * bld_base,
1168 struct lp_build_emit_data * emit_data)
1169 {
1170 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
1171 struct gallivm_state *gallivm = bld_base->base.gallivm;
1172 const struct tgsi_full_instruction * inst = emit_data->inst;
1173 unsigned opcode = inst->Instruction.Opcode;
1174 unsigned target = inst->Texture.Texture;
1175 unsigned sampler_src, sampler_index;
1176 LLVMValueRef coords[4];
1177 LLVMValueRef address[16];
1178 int ref_pos;
1179 unsigned num_coords = tgsi_util_get_texture_coord_dim(target, &ref_pos);
1180 unsigned count = 0;
1181 unsigned chan;
1182
1183 /* Fetch and project texture coordinates */
1184 coords[3] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
1185 for (chan = 0; chan < 3; chan++ ) {
1186 coords[chan] = lp_build_emit_fetch(bld_base,
1187 emit_data->inst, 0,
1188 chan);
1189 if (opcode == TGSI_OPCODE_TXP)
1190 coords[chan] = lp_build_emit_llvm_binary(bld_base,
1191 TGSI_OPCODE_DIV,
1192 coords[chan],
1193 coords[3]);
1194 }
1195
1196 if (opcode == TGSI_OPCODE_TXP)
1197 coords[3] = bld_base->base.one;
1198
1199 /* Pack LOD bias value */
1200 if (opcode == TGSI_OPCODE_TXB)
1201 address[count++] = coords[3];
1202
1203 if (target == TGSI_TEXTURE_CUBE || target == TGSI_TEXTURE_SHADOWCUBE)
1204 radeon_llvm_emit_prepare_cube_coords(bld_base, emit_data, coords);
1205
1206 /* Pack depth comparison value */
1207 switch (target) {
1208 case TGSI_TEXTURE_SHADOW1D:
1209 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1210 case TGSI_TEXTURE_SHADOW2D:
1211 case TGSI_TEXTURE_SHADOWRECT:
1212 case TGSI_TEXTURE_SHADOWCUBE:
1213 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1214 assert(ref_pos >= 0);
1215 address[count++] = coords[ref_pos];
1216 break;
1217 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
1218 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, 0);
1219 }
1220
1221 /* Pack user derivatives */
1222 if (opcode == TGSI_OPCODE_TXD) {
1223 for (chan = 0; chan < 2; chan++) {
1224 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, chan);
1225 if (num_coords > 1)
1226 address[count++] = lp_build_emit_fetch(bld_base, inst, 2, chan);
1227 }
1228 }
1229
1230 /* Pack texture coordinates */
1231 address[count++] = coords[0];
1232 if (num_coords > 1)
1233 address[count++] = coords[1];
1234 if (num_coords > 2)
1235 address[count++] = coords[2];
1236
1237 /* Pack LOD or sample index */
1238 if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXF)
1239 address[count++] = coords[3];
1240
1241 if (count > 16) {
1242 assert(!"Cannot handle more than 16 texture address parameters");
1243 count = 16;
1244 }
1245
1246 for (chan = 0; chan < count; chan++ ) {
1247 address[chan] = LLVMBuildBitCast(gallivm->builder,
1248 address[chan],
1249 LLVMInt32TypeInContext(gallivm->context),
1250 "");
1251 }
1252
1253 sampler_src = emit_data->inst->Instruction.NumSrcRegs - 1;
1254 sampler_index = emit_data->inst->Src[sampler_src].Register.Index;
1255
1256 /* Adjust the sample index according to FMASK.
1257 *
1258 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
1259 * which is the identity mapping. Each nibble says which physical sample
1260 * should be fetched to get that sample.
1261 *
1262 * For example, 0x11111100 means there are only 2 samples stored and
1263 * the second sample covers 3/4 of the pixel. When reading samples 0
1264 * and 1, return physical sample 0 (determined by the first two 0s
1265 * in FMASK), otherwise return physical sample 1.
1266 *
1267 * The sample index should be adjusted as follows:
1268 * sample_index = (fmask >> (sample_index * 4)) & 0xF;
1269 */
1270 if (target == TGSI_TEXTURE_2D_MSAA ||
1271 target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
1272 struct lp_build_context *uint_bld = &bld_base->uint_bld;
1273 struct lp_build_emit_data txf_emit_data = *emit_data;
1274 LLVMValueRef txf_address[4];
1275 unsigned txf_count = count;
1276
1277 memcpy(txf_address, address, sizeof(txf_address));
1278
1279 if (target == TGSI_TEXTURE_2D_MSAA) {
1280 txf_address[2] = bld_base->uint_bld.zero;
1281 }
1282 txf_address[3] = bld_base->uint_bld.zero;
1283
1284 /* Pad to a power-of-two size. */
1285 while (txf_count < util_next_power_of_two(txf_count))
1286 txf_address[txf_count++] = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
1287
1288 /* Read FMASK using TXF. */
1289 txf_emit_data.chan = 0;
1290 txf_emit_data.dst_type = LLVMVectorType(
1291 LLVMInt32TypeInContext(bld_base->base.gallivm->context), 4);
1292 txf_emit_data.args[0] = lp_build_gather_values(gallivm, txf_address, txf_count);
1293 txf_emit_data.args[1] = si_shader_ctx->resources[FMASK_TEX_OFFSET + sampler_index];
1294 txf_emit_data.args[2] = lp_build_const_int32(bld_base->base.gallivm,
1295 target == TGSI_TEXTURE_2D_MSAA ? TGSI_TEXTURE_2D : TGSI_TEXTURE_2D_ARRAY);
1296 txf_emit_data.arg_count = 3;
1297
1298 build_tex_intrinsic(&txf_action, bld_base, &txf_emit_data);
1299
1300 /* Initialize some constants. */
1301 LLVMValueRef four = LLVMConstInt(uint_bld->elem_type, 4, 0);
1302 LLVMValueRef F = LLVMConstInt(uint_bld->elem_type, 0xF, 0);
1303
1304 /* Apply the formula. */
1305 LLVMValueRef fmask =
1306 LLVMBuildExtractElement(gallivm->builder,
1307 txf_emit_data.output[0],
1308 uint_bld->zero, "");
1309
1310 unsigned sample_chan = target == TGSI_TEXTURE_2D_MSAA ? 2 : 3;
1311
1312 LLVMValueRef sample_index4 =
1313 LLVMBuildMul(gallivm->builder, address[sample_chan], four, "");
1314
1315 LLVMValueRef shifted_fmask =
1316 LLVMBuildLShr(gallivm->builder, fmask, sample_index4, "");
1317
1318 LLVMValueRef final_sample =
1319 LLVMBuildAnd(gallivm->builder, shifted_fmask, F, "");
1320
1321 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
1322 * resource descriptor is 0 (invalid),
1323 */
1324 LLVMValueRef fmask_desc =
1325 LLVMBuildBitCast(gallivm->builder,
1326 si_shader_ctx->resources[FMASK_TEX_OFFSET + sampler_index],
1327 LLVMVectorType(uint_bld->elem_type, 8), "");
1328
1329 LLVMValueRef fmask_word1 =
1330 LLVMBuildExtractElement(gallivm->builder, fmask_desc,
1331 uint_bld->one, "");
1332
1333 LLVMValueRef word1_is_nonzero =
1334 LLVMBuildICmp(gallivm->builder, LLVMIntNE,
1335 fmask_word1, uint_bld->zero, "");
1336
1337 /* Replace the MSAA sample index. */
1338 address[sample_chan] =
1339 LLVMBuildSelect(gallivm->builder, word1_is_nonzero,
1340 final_sample, address[sample_chan], "");
1341 }
1342
1343 /* Resource */
1344 emit_data->args[1] = si_shader_ctx->resources[sampler_index];
1345
1346 if (opcode == TGSI_OPCODE_TXF) {
1347 /* add tex offsets */
1348 if (inst->Texture.NumOffsets) {
1349 struct lp_build_context *uint_bld = &bld_base->uint_bld;
1350 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
1351 const struct tgsi_texture_offset * off = inst->TexOffsets;
1352
1353 assert(inst->Texture.NumOffsets == 1);
1354
1355 switch (target) {
1356 case TGSI_TEXTURE_3D:
1357 address[2] = lp_build_add(uint_bld, address[2],
1358 bld->immediates[off->Index][off->SwizzleZ]);
1359 /* fall through */
1360 case TGSI_TEXTURE_2D:
1361 case TGSI_TEXTURE_SHADOW2D:
1362 case TGSI_TEXTURE_RECT:
1363 case TGSI_TEXTURE_SHADOWRECT:
1364 case TGSI_TEXTURE_2D_ARRAY:
1365 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1366 address[1] =
1367 lp_build_add(uint_bld, address[1],
1368 bld->immediates[off->Index][off->SwizzleY]);
1369 /* fall through */
1370 case TGSI_TEXTURE_1D:
1371 case TGSI_TEXTURE_SHADOW1D:
1372 case TGSI_TEXTURE_1D_ARRAY:
1373 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1374 address[0] =
1375 lp_build_add(uint_bld, address[0],
1376 bld->immediates[off->Index][off->SwizzleX]);
1377 break;
1378 /* texture offsets do not apply to other texture targets */
1379 }
1380 }
1381
1382 emit_data->dst_type = LLVMVectorType(
1383 LLVMInt32TypeInContext(bld_base->base.gallivm->context),
1384 4);
1385
1386 emit_data->arg_count = 3;
1387 } else {
1388 /* Sampler */
1389 emit_data->args[2] = si_shader_ctx->samplers[sampler_index];
1390
1391 emit_data->dst_type = LLVMVectorType(
1392 LLVMFloatTypeInContext(bld_base->base.gallivm->context),
1393 4);
1394
1395 emit_data->arg_count = 4;
1396 }
1397
1398 /* Dimensions */
1399 emit_data->args[emit_data->arg_count - 1] =
1400 lp_build_const_int32(bld_base->base.gallivm, target);
1401
1402 /* Pad to power of two vector */
1403 while (count < util_next_power_of_two(count))
1404 address[count++] = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
1405
1406 emit_data->args[0] = lp_build_gather_values(gallivm, address, count);
1407 }
1408
1409 static void build_tex_intrinsic(const struct lp_build_tgsi_action * action,
1410 struct lp_build_tgsi_context * bld_base,
1411 struct lp_build_emit_data * emit_data)
1412 {
1413 struct lp_build_context * base = &bld_base->base;
1414 char intr_name[23];
1415
1416 sprintf(intr_name, "%sv%ui32", action->intr_name,
1417 LLVMGetVectorSize(LLVMTypeOf(emit_data->args[0])));
1418
1419 emit_data->output[emit_data->chan] = build_intrinsic(
1420 base->gallivm->builder, intr_name, emit_data->dst_type,
1421 emit_data->args, emit_data->arg_count,
1422 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1423 }
1424
1425 static void txq_fetch_args(
1426 struct lp_build_tgsi_context * bld_base,
1427 struct lp_build_emit_data * emit_data)
1428 {
1429 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
1430 const struct tgsi_full_instruction *inst = emit_data->inst;
1431
1432 /* Mip level */
1433 emit_data->args[0] = lp_build_emit_fetch(bld_base, inst, 0, TGSI_CHAN_X);
1434
1435 /* Resource */
1436 emit_data->args[1] = si_shader_ctx->resources[inst->Src[1].Register.Index];
1437
1438 /* Dimensions */
1439 emit_data->args[2] = lp_build_const_int32(bld_base->base.gallivm,
1440 inst->Texture.Texture);
1441
1442 emit_data->arg_count = 3;
1443
1444 emit_data->dst_type = LLVMVectorType(
1445 LLVMInt32TypeInContext(bld_base->base.gallivm->context),
1446 4);
1447 }
1448
1449 #if HAVE_LLVM >= 0x0304
1450
1451 static void si_llvm_emit_ddxy(
1452 const struct lp_build_tgsi_action * action,
1453 struct lp_build_tgsi_context * bld_base,
1454 struct lp_build_emit_data * emit_data)
1455 {
1456 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
1457 struct gallivm_state *gallivm = bld_base->base.gallivm;
1458 struct lp_build_context * base = &bld_base->base;
1459 const struct tgsi_full_instruction *inst = emit_data->inst;
1460 unsigned opcode = inst->Instruction.Opcode;
1461 LLVMValueRef indices[2];
1462 LLVMValueRef store_ptr, load_ptr0, load_ptr1;
1463 LLVMValueRef tl, trbl, result[4];
1464 LLVMTypeRef i32;
1465 unsigned swizzle[4];
1466 unsigned c;
1467
1468 i32 = LLVMInt32TypeInContext(gallivm->context);
1469
1470 indices[0] = bld_base->uint_bld.zero;
1471 indices[1] = build_intrinsic(gallivm->builder, "llvm.SI.tid", i32,
1472 NULL, 0, LLVMReadNoneAttribute);
1473 store_ptr = LLVMBuildGEP(gallivm->builder, si_shader_ctx->ddxy_lds,
1474 indices, 2, "");
1475
1476 indices[1] = LLVMBuildAnd(gallivm->builder, indices[1],
1477 lp_build_const_int32(gallivm, 0xfffffffc), "");
1478 load_ptr0 = LLVMBuildGEP(gallivm->builder, si_shader_ctx->ddxy_lds,
1479 indices, 2, "");
1480
1481 indices[1] = LLVMBuildAdd(gallivm->builder, indices[1],
1482 lp_build_const_int32(gallivm,
1483 opcode == TGSI_OPCODE_DDX ? 1 : 2),
1484 "");
1485 load_ptr1 = LLVMBuildGEP(gallivm->builder, si_shader_ctx->ddxy_lds,
1486 indices, 2, "");
1487
1488 for (c = 0; c < 4; ++c) {
1489 unsigned i;
1490
1491 swizzle[c] = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], c);
1492 for (i = 0; i < c; ++i) {
1493 if (swizzle[i] == swizzle[c]) {
1494 result[c] = result[i];
1495 break;
1496 }
1497 }
1498 if (i != c)
1499 continue;
1500
1501 LLVMBuildStore(gallivm->builder,
1502 LLVMBuildBitCast(gallivm->builder,
1503 lp_build_emit_fetch(bld_base, inst, 0, c),
1504 i32, ""),
1505 store_ptr);
1506
1507 tl = LLVMBuildLoad(gallivm->builder, load_ptr0, "");
1508 tl = LLVMBuildBitCast(gallivm->builder, tl, base->elem_type, "");
1509
1510 trbl = LLVMBuildLoad(gallivm->builder, load_ptr1, "");
1511 trbl = LLVMBuildBitCast(gallivm->builder, trbl, base->elem_type, "");
1512
1513 result[c] = LLVMBuildFSub(gallivm->builder, trbl, tl, "");
1514 }
1515
1516 emit_data->output[0] = lp_build_gather_values(gallivm, result, 4);
1517 }
1518
1519 #endif /* HAVE_LLVM >= 0x0304 */
1520
1521 static const struct lp_build_tgsi_action tex_action = {
1522 .fetch_args = tex_fetch_args,
1523 .emit = build_tex_intrinsic,
1524 .intr_name = "llvm.SI.sample."
1525 };
1526
1527 static const struct lp_build_tgsi_action txb_action = {
1528 .fetch_args = tex_fetch_args,
1529 .emit = build_tex_intrinsic,
1530 .intr_name = "llvm.SI.sampleb."
1531 };
1532
1533 #if HAVE_LLVM >= 0x0304
1534 static const struct lp_build_tgsi_action txd_action = {
1535 .fetch_args = tex_fetch_args,
1536 .emit = build_tex_intrinsic,
1537 .intr_name = "llvm.SI.sampled."
1538 };
1539 #endif
1540
1541 static const struct lp_build_tgsi_action txf_action = {
1542 .fetch_args = tex_fetch_args,
1543 .emit = build_tex_intrinsic,
1544 .intr_name = "llvm.SI.imageload."
1545 };
1546
1547 static const struct lp_build_tgsi_action txl_action = {
1548 .fetch_args = tex_fetch_args,
1549 .emit = build_tex_intrinsic,
1550 .intr_name = "llvm.SI.samplel."
1551 };
1552
1553 static const struct lp_build_tgsi_action txq_action = {
1554 .fetch_args = txq_fetch_args,
1555 .emit = build_tgsi_intrinsic_nomem,
1556 .intr_name = "llvm.SI.resinfo"
1557 };
1558
1559 static void create_meta_data(struct si_shader_context *si_shader_ctx)
1560 {
1561 struct gallivm_state *gallivm = si_shader_ctx->radeon_bld.soa.bld_base.base.gallivm;
1562 LLVMValueRef args[3];
1563
1564 args[0] = LLVMMDStringInContext(gallivm->context, "const", 5);
1565 args[1] = 0;
1566 args[2] = lp_build_const_int32(gallivm, 1);
1567
1568 si_shader_ctx->const_md = LLVMMDNodeInContext(gallivm->context, args, 3);
1569 }
1570
1571 static void create_function(struct si_shader_context *si_shader_ctx)
1572 {
1573 struct lp_build_tgsi_context *bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
1574 struct gallivm_state *gallivm = bld_base->base.gallivm;
1575 LLVMTypeRef params[21], f32, i8, i32, v2i32, v3i32;
1576 unsigned i, last_sgpr, num_params;
1577
1578 i8 = LLVMInt8TypeInContext(gallivm->context);
1579 i32 = LLVMInt32TypeInContext(gallivm->context);
1580 f32 = LLVMFloatTypeInContext(gallivm->context);
1581 v2i32 = LLVMVectorType(i32, 2);
1582 v3i32 = LLVMVectorType(i32, 3);
1583
1584 params[SI_PARAM_CONST] = LLVMPointerType(LLVMVectorType(i8, 16), CONST_ADDR_SPACE);
1585 params[SI_PARAM_SAMPLER] = params[SI_PARAM_CONST];
1586 params[SI_PARAM_RESOURCE] = LLVMPointerType(LLVMVectorType(i8, 32), CONST_ADDR_SPACE);
1587
1588 switch (si_shader_ctx->type) {
1589 case TGSI_PROCESSOR_VERTEX:
1590 params[SI_PARAM_VERTEX_BUFFER] = params[SI_PARAM_CONST];
1591 params[SI_PARAM_SO_BUFFER] = params[SI_PARAM_CONST];
1592 params[SI_PARAM_START_INSTANCE] = i32;
1593 num_params = SI_PARAM_START_INSTANCE+1;
1594
1595 /* The locations of the other parameters are assigned dynamically. */
1596
1597 /* Streamout SGPRs. */
1598 if (si_shader_ctx->shader->selector->so.num_outputs) {
1599 params[si_shader_ctx->param_streamout_config = num_params++] = i32;
1600 params[si_shader_ctx->param_streamout_write_index = num_params++] = i32;
1601 }
1602 /* A streamout buffer offset is loaded if the stride is non-zero. */
1603 for (i = 0; i < 4; i++) {
1604 if (!si_shader_ctx->shader->selector->so.stride[i])
1605 continue;
1606
1607 params[si_shader_ctx->param_streamout_offset[i] = num_params++] = i32;
1608 }
1609
1610 last_sgpr = num_params-1;
1611
1612 /* VGPRs */
1613 params[si_shader_ctx->param_vertex_id = num_params++] = i32;
1614 params[num_params++] = i32; /* unused*/
1615 params[num_params++] = i32; /* unused */
1616 params[si_shader_ctx->param_instance_id = num_params++] = i32;
1617 break;
1618
1619 case TGSI_PROCESSOR_FRAGMENT:
1620 params[SI_PARAM_ALPHA_REF] = f32;
1621 params[SI_PARAM_PRIM_MASK] = i32;
1622 last_sgpr = SI_PARAM_PRIM_MASK;
1623 params[SI_PARAM_PERSP_SAMPLE] = v2i32;
1624 params[SI_PARAM_PERSP_CENTER] = v2i32;
1625 params[SI_PARAM_PERSP_CENTROID] = v2i32;
1626 params[SI_PARAM_PERSP_PULL_MODEL] = v3i32;
1627 params[SI_PARAM_LINEAR_SAMPLE] = v2i32;
1628 params[SI_PARAM_LINEAR_CENTER] = v2i32;
1629 params[SI_PARAM_LINEAR_CENTROID] = v2i32;
1630 params[SI_PARAM_LINE_STIPPLE_TEX] = f32;
1631 params[SI_PARAM_POS_X_FLOAT] = f32;
1632 params[SI_PARAM_POS_Y_FLOAT] = f32;
1633 params[SI_PARAM_POS_Z_FLOAT] = f32;
1634 params[SI_PARAM_POS_W_FLOAT] = f32;
1635 params[SI_PARAM_FRONT_FACE] = f32;
1636 params[SI_PARAM_ANCILLARY] = f32;
1637 params[SI_PARAM_SAMPLE_COVERAGE] = f32;
1638 params[SI_PARAM_POS_FIXED_PT] = f32;
1639 num_params = SI_PARAM_POS_FIXED_PT+1;
1640 break;
1641
1642 default:
1643 assert(0 && "unimplemented shader");
1644 return;
1645 }
1646
1647 assert(num_params <= Elements(params));
1648 radeon_llvm_create_func(&si_shader_ctx->radeon_bld, params, num_params);
1649 radeon_llvm_shader_type(si_shader_ctx->radeon_bld.main_fn, si_shader_ctx->type);
1650
1651 for (i = 0; i <= last_sgpr; ++i) {
1652 LLVMValueRef P = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, i);
1653 LLVMAddAttribute(P, LLVMInRegAttribute);
1654 }
1655
1656 #if HAVE_LLVM >= 0x0304
1657 if (bld_base->info->opcode_count[TGSI_OPCODE_DDX] > 0 ||
1658 bld_base->info->opcode_count[TGSI_OPCODE_DDY] > 0)
1659 si_shader_ctx->ddxy_lds =
1660 LLVMAddGlobalInAddressSpace(gallivm->module,
1661 LLVMArrayType(i32, 64),
1662 "ddxy_lds",
1663 LOCAL_ADDR_SPACE);
1664 #endif
1665 }
1666
1667 static void preload_constants(struct si_shader_context *si_shader_ctx)
1668 {
1669 struct lp_build_tgsi_context * bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
1670 struct gallivm_state * gallivm = bld_base->base.gallivm;
1671 const struct tgsi_shader_info * info = bld_base->info;
1672
1673 unsigned i, num_const = info->file_max[TGSI_FILE_CONSTANT] + 1;
1674
1675 LLVMValueRef ptr;
1676
1677 if (num_const == 0)
1678 return;
1679
1680 /* Allocate space for the constant values */
1681 si_shader_ctx->constants = CALLOC(num_const * 4, sizeof(LLVMValueRef));
1682
1683 /* Load the resource descriptor */
1684 ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
1685 si_shader_ctx->const_resource = build_indexed_load(si_shader_ctx, ptr, bld_base->uint_bld.zero);
1686
1687 /* Load the constants, we rely on the code sinking to do the rest */
1688 for (i = 0; i < num_const * 4; ++i) {
1689 LLVMValueRef args[2] = {
1690 si_shader_ctx->const_resource,
1691 lp_build_const_int32(gallivm, i * 4)
1692 };
1693 si_shader_ctx->constants[i] = build_intrinsic(gallivm->builder, "llvm.SI.load.const",
1694 bld_base->base.elem_type, args, 2, LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1695 }
1696 }
1697
1698 static void preload_samplers(struct si_shader_context *si_shader_ctx)
1699 {
1700 struct lp_build_tgsi_context * bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
1701 struct gallivm_state * gallivm = bld_base->base.gallivm;
1702 const struct tgsi_shader_info * info = bld_base->info;
1703
1704 unsigned i, num_samplers = info->file_max[TGSI_FILE_SAMPLER] + 1;
1705
1706 LLVMValueRef res_ptr, samp_ptr;
1707 LLVMValueRef offset;
1708
1709 if (num_samplers == 0)
1710 return;
1711
1712 /* Allocate space for the values */
1713 si_shader_ctx->resources = CALLOC(NUM_SAMPLER_VIEWS, sizeof(LLVMValueRef));
1714 si_shader_ctx->samplers = CALLOC(num_samplers, sizeof(LLVMValueRef));
1715
1716 res_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_RESOURCE);
1717 samp_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_SAMPLER);
1718
1719 /* Load the resources and samplers, we rely on the code sinking to do the rest */
1720 for (i = 0; i < num_samplers; ++i) {
1721 /* Resource */
1722 offset = lp_build_const_int32(gallivm, i);
1723 si_shader_ctx->resources[i] = build_indexed_load(si_shader_ctx, res_ptr, offset);
1724
1725 /* Sampler */
1726 offset = lp_build_const_int32(gallivm, i);
1727 si_shader_ctx->samplers[i] = build_indexed_load(si_shader_ctx, samp_ptr, offset);
1728
1729 /* FMASK resource */
1730 if (info->is_msaa_sampler[i]) {
1731 offset = lp_build_const_int32(gallivm, FMASK_TEX_OFFSET + i);
1732 si_shader_ctx->resources[FMASK_TEX_OFFSET + i] =
1733 build_indexed_load(si_shader_ctx, res_ptr, offset);
1734 }
1735 }
1736 }
1737
1738 static void preload_streamout_buffers(struct si_shader_context *si_shader_ctx)
1739 {
1740 struct lp_build_tgsi_context * bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
1741 struct gallivm_state * gallivm = bld_base->base.gallivm;
1742 unsigned i;
1743
1744 if (!si_shader_ctx->shader->selector->so.num_outputs)
1745 return;
1746
1747 LLVMValueRef buf_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
1748 SI_PARAM_SO_BUFFER);
1749
1750 /* Load the resources, we rely on the code sinking to do the rest */
1751 for (i = 0; i < 4; ++i) {
1752 if (si_shader_ctx->shader->selector->so.stride[i]) {
1753 LLVMValueRef offset = lp_build_const_int32(gallivm, i);
1754
1755 si_shader_ctx->so_buffers[i] = build_indexed_load(si_shader_ctx, buf_ptr, offset);
1756 }
1757 }
1758 }
1759
1760 int si_compile_llvm(struct r600_context *rctx, struct si_pipe_shader *shader,
1761 LLVMModuleRef mod)
1762 {
1763 unsigned i;
1764 uint32_t *ptr;
1765 struct radeon_llvm_binary binary;
1766 bool dump = r600_can_dump_shader(&rctx->screen->b,
1767 shader->selector ? shader->selector->tokens : NULL);
1768 memset(&binary, 0, sizeof(binary));
1769 radeon_llvm_compile(mod, &binary,
1770 r600_get_llvm_processor_name(rctx->screen->b.family), dump);
1771 if (dump && ! binary.disassembled) {
1772 fprintf(stderr, "SI CODE:\n");
1773 for (i = 0; i < binary.code_size; i+=4 ) {
1774 fprintf(stderr, "%02x%02x%02x%02x\n", binary.code[i + 3],
1775 binary.code[i + 2], binary.code[i + 1],
1776 binary.code[i]);
1777 }
1778 }
1779
1780 /* XXX: We may be able to emit some of these values directly rather than
1781 * extracting fields to be emitted later.
1782 */
1783 for (i = 0; i < binary.config_size; i+= 8) {
1784 unsigned reg = util_le32_to_cpu(*(uint32_t*)(binary.config + i));
1785 unsigned value = util_le32_to_cpu(*(uint32_t*)(binary.config + i + 4));
1786 switch (reg) {
1787 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
1788 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
1789 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
1790 case R_00B848_COMPUTE_PGM_RSRC1:
1791 shader->num_sgprs = (G_00B028_SGPRS(value) + 1) * 8;
1792 shader->num_vgprs = (G_00B028_VGPRS(value) + 1) * 4;
1793 break;
1794 case R_00B02C_SPI_SHADER_PGM_RSRC2_PS:
1795 shader->lds_size = G_00B02C_EXTRA_LDS_SIZE(value);
1796 break;
1797 case R_00B84C_COMPUTE_PGM_RSRC2:
1798 shader->lds_size = G_00B84C_LDS_SIZE(value);
1799 break;
1800 case R_0286CC_SPI_PS_INPUT_ENA:
1801 shader->spi_ps_input_ena = value;
1802 break;
1803 default:
1804 fprintf(stderr, "Warning: Compiler emitted unknown "
1805 "config register: 0x%x\n", reg);
1806 break;
1807 }
1808 }
1809
1810 /* copy new shader */
1811 r600_resource_reference(&shader->bo, NULL);
1812 shader->bo = r600_resource_create_custom(rctx->b.b.screen, PIPE_USAGE_IMMUTABLE,
1813 binary.code_size);
1814 if (shader->bo == NULL) {
1815 return -ENOMEM;
1816 }
1817
1818 ptr = (uint32_t*)rctx->b.ws->buffer_map(shader->bo->cs_buf, rctx->b.rings.gfx.cs, PIPE_TRANSFER_WRITE);
1819 if (0 /*R600_BIG_ENDIAN*/) {
1820 for (i = 0; i < binary.code_size / 4; ++i) {
1821 ptr[i] = util_bswap32(*(uint32_t*)(binary.code + i*4));
1822 }
1823 } else {
1824 memcpy(ptr, binary.code, binary.code_size);
1825 }
1826 rctx->b.ws->buffer_unmap(shader->bo->cs_buf);
1827
1828 free(binary.code);
1829 free(binary.config);
1830
1831 return 0;
1832 }
1833
1834 int si_pipe_shader_create(
1835 struct pipe_context *ctx,
1836 struct si_pipe_shader *shader)
1837 {
1838 struct r600_context *rctx = (struct r600_context*)ctx;
1839 struct si_pipe_shader_selector *sel = shader->selector;
1840 struct si_shader_context si_shader_ctx;
1841 struct tgsi_shader_info shader_info;
1842 struct lp_build_tgsi_context * bld_base;
1843 LLVMModuleRef mod;
1844 int r = 0;
1845 bool dump = r600_can_dump_shader(&rctx->screen->b, shader->selector->tokens);
1846
1847 assert(shader->shader.noutput == 0);
1848 assert(shader->shader.ninterp == 0);
1849 assert(shader->shader.ninput == 0);
1850
1851 memset(&si_shader_ctx, 0, sizeof(si_shader_ctx));
1852 radeon_llvm_context_init(&si_shader_ctx.radeon_bld);
1853 bld_base = &si_shader_ctx.radeon_bld.soa.bld_base;
1854
1855 tgsi_scan_shader(sel->tokens, &shader_info);
1856
1857 shader->shader.uses_kill = shader_info.uses_kill;
1858 shader->shader.uses_instanceid = shader_info.uses_instanceid;
1859 bld_base->info = &shader_info;
1860 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
1861 bld_base->emit_epilogue = si_llvm_emit_epilogue;
1862
1863 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
1864 bld_base->op_actions[TGSI_OPCODE_TXB] = txb_action;
1865 #if HAVE_LLVM >= 0x0304
1866 bld_base->op_actions[TGSI_OPCODE_TXD] = txd_action;
1867 #endif
1868 bld_base->op_actions[TGSI_OPCODE_TXF] = txf_action;
1869 bld_base->op_actions[TGSI_OPCODE_TXL] = txl_action;
1870 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
1871 bld_base->op_actions[TGSI_OPCODE_TXQ] = txq_action;
1872
1873 #if HAVE_LLVM >= 0x0304
1874 bld_base->op_actions[TGSI_OPCODE_DDX].emit = si_llvm_emit_ddxy;
1875 bld_base->op_actions[TGSI_OPCODE_DDY].emit = si_llvm_emit_ddxy;
1876 #endif
1877
1878 si_shader_ctx.radeon_bld.load_input = declare_input;
1879 si_shader_ctx.radeon_bld.load_system_value = declare_system_value;
1880 si_shader_ctx.tokens = sel->tokens;
1881 tgsi_parse_init(&si_shader_ctx.parse, si_shader_ctx.tokens);
1882 si_shader_ctx.shader = shader;
1883 si_shader_ctx.type = si_shader_ctx.parse.FullHeader.Processor.Processor;
1884
1885 create_meta_data(&si_shader_ctx);
1886 create_function(&si_shader_ctx);
1887 preload_constants(&si_shader_ctx);
1888 preload_samplers(&si_shader_ctx);
1889 preload_streamout_buffers(&si_shader_ctx);
1890
1891 shader->shader.nr_cbufs = rctx->framebuffer.nr_cbufs;
1892
1893 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
1894 * conversion fails. */
1895 if (dump) {
1896 tgsi_dump(sel->tokens, 0);
1897 si_dump_streamout(&sel->so);
1898 }
1899
1900 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
1901 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
1902 FREE(si_shader_ctx.constants);
1903 FREE(si_shader_ctx.resources);
1904 FREE(si_shader_ctx.samplers);
1905 return -EINVAL;
1906 }
1907
1908 radeon_llvm_finalize_module(&si_shader_ctx.radeon_bld);
1909
1910 mod = bld_base->base.gallivm->module;
1911 r = si_compile_llvm(rctx, shader, mod);
1912
1913 radeon_llvm_dispose(&si_shader_ctx.radeon_bld);
1914 tgsi_parse_free(&si_shader_ctx.parse);
1915
1916 FREE(si_shader_ctx.constants);
1917 FREE(si_shader_ctx.resources);
1918 FREE(si_shader_ctx.samplers);
1919
1920 return r;
1921 }
1922
1923 void si_pipe_shader_destroy(struct pipe_context *ctx, struct si_pipe_shader *shader)
1924 {
1925 r600_resource_reference(&shader->bo, NULL);
1926 }