radeonsi: Handle TGSI_OPCODE_DDX/Y using local memory
[mesa.git] / src / gallium / drivers / radeonsi / radeonsi_shader.c
1
2 /*
3 * Copyright 2012 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Tom Stellard <thomas.stellard@amd.com>
26 * Michel Dänzer <michel.daenzer@amd.com>
27 * Christian König <christian.koenig@amd.com>
28 */
29
30 #include "gallivm/lp_bld_tgsi_action.h"
31 #include "gallivm/lp_bld_const.h"
32 #include "gallivm/lp_bld_gather.h"
33 #include "gallivm/lp_bld_intr.h"
34 #include "gallivm/lp_bld_logic.h"
35 #include "gallivm/lp_bld_tgsi.h"
36 #include "gallivm/lp_bld_arit.h"
37 #include "radeon_llvm.h"
38 #include "radeon_llvm_emit.h"
39 #include "util/u_memory.h"
40 #include "tgsi/tgsi_info.h"
41 #include "tgsi/tgsi_parse.h"
42 #include "tgsi/tgsi_scan.h"
43 #include "tgsi/tgsi_util.h"
44 #include "tgsi/tgsi_dump.h"
45
46 #include "radeonsi_pipe.h"
47 #include "radeonsi_shader.h"
48 #include "si_state.h"
49 #include "sid.h"
50
51 #include <assert.h>
52 #include <errno.h>
53 #include <stdio.h>
54
55 struct si_shader_context
56 {
57 struct radeon_llvm_context radeon_bld;
58 struct tgsi_parse_context parse;
59 struct tgsi_token * tokens;
60 struct si_pipe_shader *shader;
61 unsigned type; /* TGSI_PROCESSOR_* specifies the type of shader. */
62 LLVMValueRef const_md;
63 LLVMValueRef const_resource;
64 #if HAVE_LLVM >= 0x0304
65 LLVMValueRef ddxy_lds;
66 #endif
67 LLVMValueRef *constants;
68 LLVMValueRef *resources;
69 LLVMValueRef *samplers;
70 };
71
72 static struct si_shader_context * si_shader_context(
73 struct lp_build_tgsi_context * bld_base)
74 {
75 return (struct si_shader_context *)bld_base;
76 }
77
78
79 #define PERSPECTIVE_BASE 0
80 #define LINEAR_BASE 9
81
82 #define SAMPLE_OFFSET 0
83 #define CENTER_OFFSET 2
84 #define CENTROID_OFSET 4
85
86 #define USE_SGPR_MAX_SUFFIX_LEN 5
87 #define CONST_ADDR_SPACE 2
88 #define LOCAL_ADDR_SPACE 3
89 #define USER_SGPR_ADDR_SPACE 8
90
91 /**
92 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad
93 *
94 * @param offset The offset parameter specifies the number of
95 * elements to offset, not the number of bytes or dwords. An element is the
96 * the type pointed to by the base_ptr parameter (e.g. int is the element of
97 * an int* pointer)
98 *
99 * When LLVM lowers the load instruction, it will convert the element offset
100 * into a dword offset automatically.
101 *
102 */
103 static LLVMValueRef build_indexed_load(
104 struct si_shader_context * si_shader_ctx,
105 LLVMValueRef base_ptr,
106 LLVMValueRef offset)
107 {
108 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
109
110 LLVMValueRef computed_ptr = LLVMBuildGEP(
111 base->gallivm->builder, base_ptr, &offset, 1, "");
112
113 LLVMValueRef result = LLVMBuildLoad(base->gallivm->builder, computed_ptr, "");
114 LLVMSetMetadata(result, 1, si_shader_ctx->const_md);
115 return result;
116 }
117
118 static LLVMValueRef get_instance_index(
119 struct radeon_llvm_context * radeon_bld,
120 unsigned divisor)
121 {
122 struct gallivm_state * gallivm = radeon_bld->soa.bld_base.base.gallivm;
123
124 LLVMValueRef result = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_INSTANCE_ID);
125 result = LLVMBuildAdd(gallivm->builder, result, LLVMGetParam(
126 radeon_bld->main_fn, SI_PARAM_START_INSTANCE), "");
127
128 if (divisor > 1)
129 result = LLVMBuildUDiv(gallivm->builder, result,
130 lp_build_const_int32(gallivm, divisor), "");
131
132 return result;
133 }
134
135 static void declare_input_vs(
136 struct si_shader_context * si_shader_ctx,
137 unsigned input_index,
138 const struct tgsi_full_declaration *decl)
139 {
140 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
141 unsigned divisor = si_shader_ctx->shader->key.vs.instance_divisors[input_index];
142
143 unsigned chan;
144
145 LLVMValueRef t_list_ptr;
146 LLVMValueRef t_offset;
147 LLVMValueRef t_list;
148 LLVMValueRef attribute_offset;
149 LLVMValueRef buffer_index;
150 LLVMValueRef args[3];
151 LLVMTypeRef vec4_type;
152 LLVMValueRef input;
153
154 /* Load the T list */
155 t_list_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_VERTEX_BUFFER);
156
157 t_offset = lp_build_const_int32(base->gallivm, input_index);
158
159 t_list = build_indexed_load(si_shader_ctx, t_list_ptr, t_offset);
160
161 /* Build the attribute offset */
162 attribute_offset = lp_build_const_int32(base->gallivm, 0);
163
164 if (divisor) {
165 /* Build index from instance ID, start instance and divisor */
166 si_shader_ctx->shader->shader.uses_instanceid = true;
167 buffer_index = get_instance_index(&si_shader_ctx->radeon_bld, divisor);
168 } else {
169 /* Load the buffer index, which is always stored in VGPR0
170 * for Vertex Shaders */
171 buffer_index = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_VERTEX_ID);
172 }
173
174 vec4_type = LLVMVectorType(base->elem_type, 4);
175 args[0] = t_list;
176 args[1] = attribute_offset;
177 args[2] = buffer_index;
178 input = build_intrinsic(base->gallivm->builder,
179 "llvm.SI.vs.load.input", vec4_type, args, 3,
180 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
181
182 /* Break up the vec4 into individual components */
183 for (chan = 0; chan < 4; chan++) {
184 LLVMValueRef llvm_chan = lp_build_const_int32(base->gallivm, chan);
185 /* XXX: Use a helper function for this. There is one in
186 * tgsi_llvm.c. */
187 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, chan)] =
188 LLVMBuildExtractElement(base->gallivm->builder,
189 input, llvm_chan, "");
190 }
191 }
192
193 static void declare_input_fs(
194 struct si_shader_context * si_shader_ctx,
195 unsigned input_index,
196 const struct tgsi_full_declaration *decl)
197 {
198 struct si_shader *shader = &si_shader_ctx->shader->shader;
199 struct lp_build_context * base =
200 &si_shader_ctx->radeon_bld.soa.bld_base.base;
201 struct gallivm_state * gallivm = base->gallivm;
202 LLVMTypeRef input_type = LLVMFloatTypeInContext(gallivm->context);
203 LLVMValueRef main_fn = si_shader_ctx->radeon_bld.main_fn;
204
205 LLVMValueRef interp_param;
206 const char * intr_name;
207
208 /* This value is:
209 * [15:0] NewPrimMask (Bit mask for each quad. It is set it the
210 * quad begins a new primitive. Bit 0 always needs
211 * to be unset)
212 * [32:16] ParamOffset
213 *
214 */
215 LLVMValueRef params = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_PRIM_MASK);
216 LLVMValueRef attr_number;
217
218 unsigned chan;
219
220 if (decl->Semantic.Name == TGSI_SEMANTIC_POSITION) {
221 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
222 unsigned soa_index =
223 radeon_llvm_reg_index_soa(input_index, chan);
224 si_shader_ctx->radeon_bld.inputs[soa_index] =
225 LLVMGetParam(main_fn, SI_PARAM_POS_X_FLOAT + chan);
226
227 if (chan == 3)
228 /* RCP for fragcoord.w */
229 si_shader_ctx->radeon_bld.inputs[soa_index] =
230 LLVMBuildFDiv(gallivm->builder,
231 lp_build_const_float(gallivm, 1.0f),
232 si_shader_ctx->radeon_bld.inputs[soa_index],
233 "");
234 }
235 return;
236 }
237
238 if (decl->Semantic.Name == TGSI_SEMANTIC_FACE) {
239 LLVMValueRef face, is_face_positive;
240
241 face = LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE);
242
243 is_face_positive = LLVMBuildFCmp(gallivm->builder,
244 LLVMRealUGT, face,
245 lp_build_const_float(gallivm, 0.0f),
246 "");
247
248 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 0)] =
249 LLVMBuildSelect(gallivm->builder,
250 is_face_positive,
251 lp_build_const_float(gallivm, 1.0f),
252 lp_build_const_float(gallivm, 0.0f),
253 "");
254 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 1)] =
255 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 2)] =
256 lp_build_const_float(gallivm, 0.0f);
257 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 3)] =
258 lp_build_const_float(gallivm, 1.0f);
259
260 return;
261 }
262
263 shader->input[input_index].param_offset = shader->ninterp++;
264 attr_number = lp_build_const_int32(gallivm,
265 shader->input[input_index].param_offset);
266
267 /* XXX: Handle all possible interpolation modes */
268 switch (decl->Interp.Interpolate) {
269 case TGSI_INTERPOLATE_COLOR:
270 if (si_shader_ctx->shader->key.ps.flatshade) {
271 interp_param = 0;
272 } else {
273 if (decl->Interp.Centroid)
274 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTROID);
275 else
276 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTER);
277 }
278 break;
279 case TGSI_INTERPOLATE_CONSTANT:
280 interp_param = 0;
281 break;
282 case TGSI_INTERPOLATE_LINEAR:
283 if (decl->Interp.Centroid)
284 interp_param = LLVMGetParam(main_fn, SI_PARAM_LINEAR_CENTROID);
285 else
286 interp_param = LLVMGetParam(main_fn, SI_PARAM_LINEAR_CENTER);
287 break;
288 case TGSI_INTERPOLATE_PERSPECTIVE:
289 if (decl->Interp.Centroid)
290 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTROID);
291 else
292 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTER);
293 break;
294 default:
295 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
296 return;
297 }
298
299 intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
300
301 /* XXX: Could there be more than TGSI_NUM_CHANNELS (4) ? */
302 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR &&
303 si_shader_ctx->shader->key.ps.color_two_side) {
304 LLVMValueRef args[4];
305 LLVMValueRef face, is_face_positive;
306 LLVMValueRef back_attr_number =
307 lp_build_const_int32(gallivm,
308 shader->input[input_index].param_offset + 1);
309
310 face = LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE);
311
312 is_face_positive = LLVMBuildFCmp(gallivm->builder,
313 LLVMRealUGT, face,
314 lp_build_const_float(gallivm, 0.0f),
315 "");
316
317 args[2] = params;
318 args[3] = interp_param;
319 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
320 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
321 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
322 LLVMValueRef front, back;
323
324 args[0] = llvm_chan;
325 args[1] = attr_number;
326 front = build_intrinsic(base->gallivm->builder, intr_name,
327 input_type, args, args[3] ? 4 : 3,
328 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
329
330 args[1] = back_attr_number;
331 back = build_intrinsic(base->gallivm->builder, intr_name,
332 input_type, args, args[3] ? 4 : 3,
333 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
334
335 si_shader_ctx->radeon_bld.inputs[soa_index] =
336 LLVMBuildSelect(gallivm->builder,
337 is_face_positive,
338 front,
339 back,
340 "");
341 }
342
343 shader->ninterp++;
344 } else {
345 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
346 LLVMValueRef args[4];
347 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
348 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
349 args[0] = llvm_chan;
350 args[1] = attr_number;
351 args[2] = params;
352 args[3] = interp_param;
353 si_shader_ctx->radeon_bld.inputs[soa_index] =
354 build_intrinsic(base->gallivm->builder, intr_name,
355 input_type, args, args[3] ? 4 : 3,
356 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
357 }
358 }
359 }
360
361 static void declare_input(
362 struct radeon_llvm_context * radeon_bld,
363 unsigned input_index,
364 const struct tgsi_full_declaration *decl)
365 {
366 struct si_shader_context * si_shader_ctx =
367 si_shader_context(&radeon_bld->soa.bld_base);
368 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
369 declare_input_vs(si_shader_ctx, input_index, decl);
370 } else if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
371 declare_input_fs(si_shader_ctx, input_index, decl);
372 } else {
373 fprintf(stderr, "Warning: Unsupported shader type,\n");
374 }
375 }
376
377 static void declare_system_value(
378 struct radeon_llvm_context * radeon_bld,
379 unsigned index,
380 const struct tgsi_full_declaration *decl)
381 {
382
383 LLVMValueRef value = 0;
384
385 switch (decl->Semantic.Name) {
386 case TGSI_SEMANTIC_INSTANCEID:
387 value = get_instance_index(radeon_bld, 1);
388 break;
389
390 case TGSI_SEMANTIC_VERTEXID:
391 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_VERTEX_ID);
392 break;
393
394 default:
395 assert(!"unknown system value");
396 return;
397 }
398
399 radeon_bld->system_values[index] = value;
400 }
401
402 static LLVMValueRef fetch_constant(
403 struct lp_build_tgsi_context * bld_base,
404 const struct tgsi_full_src_register *reg,
405 enum tgsi_opcode_type type,
406 unsigned swizzle)
407 {
408 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
409 struct lp_build_context * base = &bld_base->base;
410 const struct tgsi_ind_register *ireg = &reg->Indirect;
411 unsigned idx;
412
413 LLVMValueRef args[2];
414 LLVMValueRef addr;
415 LLVMValueRef result;
416
417 if (swizzle == LP_CHAN_ALL) {
418 unsigned chan;
419 LLVMValueRef values[4];
420 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
421 values[chan] = fetch_constant(bld_base, reg, type, chan);
422
423 return lp_build_gather_values(bld_base->base.gallivm, values, 4);
424 }
425
426 idx = reg->Register.Index * 4 + swizzle;
427 if (!reg->Register.Indirect)
428 return bitcast(bld_base, type, si_shader_ctx->constants[idx]);
429
430 args[0] = si_shader_ctx->const_resource;
431 args[1] = lp_build_const_int32(base->gallivm, idx * 4);
432 addr = si_shader_ctx->radeon_bld.soa.addr[ireg->Index][ireg->Swizzle];
433 addr = LLVMBuildLoad(base->gallivm->builder, addr, "load addr reg");
434 addr = lp_build_mul_imm(&bld_base->uint_bld, addr, 16);
435 args[1] = lp_build_add(&bld_base->uint_bld, addr, args[1]);
436
437 result = build_intrinsic(base->gallivm->builder, "llvm.SI.load.const", base->elem_type,
438 args, 2, LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
439
440 return bitcast(bld_base, type, result);
441 }
442
443 /* Initialize arguments for the shader export intrinsic */
444 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
445 struct tgsi_full_declaration *d,
446 unsigned index,
447 unsigned target,
448 LLVMValueRef *args)
449 {
450 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
451 struct lp_build_context *uint =
452 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
453 struct lp_build_context *base = &bld_base->base;
454 unsigned compressed = 0;
455 unsigned chan;
456
457 if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
458 int cbuf = target - V_008DFC_SQ_EXP_MRT;
459
460 if (cbuf >= 0 && cbuf < 8) {
461 compressed = (si_shader_ctx->shader->key.ps.export_16bpc >> cbuf) & 0x1;
462
463 if (compressed)
464 si_shader_ctx->shader->spi_shader_col_format |=
465 V_028714_SPI_SHADER_FP16_ABGR << (4 * cbuf);
466 else
467 si_shader_ctx->shader->spi_shader_col_format |=
468 V_028714_SPI_SHADER_32_ABGR << (4 * cbuf);
469
470 si_shader_ctx->shader->cb_shader_mask |= 0xf << (4 * cbuf);
471 }
472 }
473
474 if (compressed) {
475 /* Pixel shader needs to pack output values before export */
476 for (chan = 0; chan < 2; chan++ ) {
477 LLVMValueRef *out_ptr =
478 si_shader_ctx->radeon_bld.soa.outputs[index];
479 args[0] = LLVMBuildLoad(base->gallivm->builder,
480 out_ptr[2 * chan], "");
481 args[1] = LLVMBuildLoad(base->gallivm->builder,
482 out_ptr[2 * chan + 1], "");
483 args[chan + 5] =
484 build_intrinsic(base->gallivm->builder,
485 "llvm.SI.packf16",
486 LLVMInt32TypeInContext(base->gallivm->context),
487 args, 2,
488 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
489 args[chan + 7] = args[chan + 5] =
490 LLVMBuildBitCast(base->gallivm->builder,
491 args[chan + 5],
492 LLVMFloatTypeInContext(base->gallivm->context),
493 "");
494 }
495
496 /* Set COMPR flag */
497 args[4] = uint->one;
498 } else {
499 for (chan = 0; chan < 4; chan++ ) {
500 LLVMValueRef out_ptr =
501 si_shader_ctx->radeon_bld.soa.outputs[index][chan];
502 /* +5 because the first output value will be
503 * the 6th argument to the intrinsic. */
504 args[chan + 5] = LLVMBuildLoad(base->gallivm->builder,
505 out_ptr, "");
506 }
507
508 /* Clear COMPR flag */
509 args[4] = uint->zero;
510 }
511
512 /* XXX: This controls which components of the output
513 * registers actually get exported. (e.g bit 0 means export
514 * X component, bit 1 means export Y component, etc.) I'm
515 * hard coding this to 0xf for now. In the future, we might
516 * want to do something else. */
517 args[0] = lp_build_const_int32(base->gallivm, 0xf);
518
519 /* Specify whether the EXEC mask represents the valid mask */
520 args[1] = uint->zero;
521
522 /* Specify whether this is the last export */
523 args[2] = uint->zero;
524
525 /* Specify the target we are exporting */
526 args[3] = lp_build_const_int32(base->gallivm, target);
527
528 /* XXX: We probably need to keep track of the output
529 * values, so we know what we are passing to the next
530 * stage. */
531 }
532
533 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
534 unsigned index)
535 {
536 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
537 struct gallivm_state *gallivm = bld_base->base.gallivm;
538
539 if (si_shader_ctx->shader->key.ps.alpha_func != PIPE_FUNC_NEVER) {
540 LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][3];
541 LLVMValueRef alpha_pass =
542 lp_build_cmp(&bld_base->base,
543 si_shader_ctx->shader->key.ps.alpha_func,
544 LLVMBuildLoad(gallivm->builder, out_ptr, ""),
545 lp_build_const_float(gallivm, si_shader_ctx->shader->key.ps.alpha_ref));
546 LLVMValueRef arg =
547 lp_build_select(&bld_base->base,
548 alpha_pass,
549 lp_build_const_float(gallivm, 1.0f),
550 lp_build_const_float(gallivm, -1.0f));
551
552 build_intrinsic(gallivm->builder,
553 "llvm.AMDGPU.kill",
554 LLVMVoidTypeInContext(gallivm->context),
555 &arg, 1, 0);
556 } else {
557 build_intrinsic(gallivm->builder,
558 "llvm.AMDGPU.kilp",
559 LLVMVoidTypeInContext(gallivm->context),
560 NULL, 0, 0);
561 }
562 }
563
564 static void si_llvm_emit_clipvertex(struct lp_build_tgsi_context * bld_base,
565 unsigned index)
566 {
567 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
568 struct lp_build_context *base = &bld_base->base;
569 struct lp_build_context *uint = &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
570 LLVMValueRef args[9];
571 unsigned reg_index;
572 unsigned chan;
573 unsigned const_chan;
574 LLVMValueRef out_elts[4];
575 LLVMValueRef base_elt;
576 LLVMValueRef ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
577 LLVMValueRef const_resource = build_indexed_load(si_shader_ctx, ptr, uint->one);
578
579 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
580 LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][chan];
581 out_elts[chan] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
582 }
583
584 for (reg_index = 0; reg_index < 2; reg_index ++) {
585 args[5] =
586 args[6] =
587 args[7] =
588 args[8] = lp_build_const_float(base->gallivm, 0.0f);
589
590 /* Compute dot products of position and user clip plane vectors */
591 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
592 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
593 args[0] = const_resource;
594 args[1] = lp_build_const_int32(base->gallivm,
595 ((reg_index * 4 + chan) * 4 +
596 const_chan) * 4);
597 base_elt = build_intrinsic(base->gallivm->builder,
598 "llvm.SI.load.const",
599 base->elem_type,
600 args, 2,
601 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
602 args[5 + chan] =
603 lp_build_add(base, args[5 + chan],
604 lp_build_mul(base, base_elt,
605 out_elts[const_chan]));
606 }
607 }
608
609 args[0] = lp_build_const_int32(base->gallivm, 0xf);
610 args[1] = uint->zero;
611 args[2] = uint->zero;
612 args[3] = lp_build_const_int32(base->gallivm,
613 V_008DFC_SQ_EXP_POS + 2 + reg_index);
614 args[4] = uint->zero;
615 lp_build_intrinsic(base->gallivm->builder,
616 "llvm.SI.export",
617 LLVMVoidTypeInContext(base->gallivm->context),
618 args, 9);
619 }
620 }
621
622 /* XXX: This is partially implemented for VS only at this point. It is not complete */
623 static void si_llvm_emit_epilogue(struct lp_build_tgsi_context * bld_base)
624 {
625 struct si_shader_context * si_shader_ctx = si_shader_context(bld_base);
626 struct si_shader * shader = &si_shader_ctx->shader->shader;
627 struct lp_build_context * base = &bld_base->base;
628 struct lp_build_context * uint =
629 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
630 struct tgsi_parse_context *parse = &si_shader_ctx->parse;
631 LLVMValueRef args[9];
632 LLVMValueRef last_args[9] = { 0 };
633 unsigned semantic_name;
634 unsigned color_count = 0;
635 unsigned param_count = 0;
636 int depth_index = -1, stencil_index = -1;
637
638 while (!tgsi_parse_end_of_tokens(parse)) {
639 struct tgsi_full_declaration *d =
640 &parse->FullToken.FullDeclaration;
641 unsigned target;
642 unsigned index;
643 int i;
644
645 tgsi_parse_token(parse);
646
647 if (parse->FullToken.Token.Type == TGSI_TOKEN_TYPE_PROPERTY &&
648 parse->FullToken.FullProperty.Property.PropertyName ==
649 TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS)
650 shader->fs_write_all = TRUE;
651
652 if (parse->FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
653 continue;
654
655 switch (d->Declaration.File) {
656 case TGSI_FILE_INPUT:
657 i = shader->ninput++;
658 assert(i < Elements(shader->input));
659 shader->input[i].name = d->Semantic.Name;
660 shader->input[i].sid = d->Semantic.Index;
661 shader->input[i].interpolate = d->Interp.Interpolate;
662 shader->input[i].centroid = d->Interp.Centroid;
663 continue;
664
665 case TGSI_FILE_OUTPUT:
666 i = shader->noutput++;
667 assert(i < Elements(shader->output));
668 shader->output[i].name = d->Semantic.Name;
669 shader->output[i].sid = d->Semantic.Index;
670 shader->output[i].interpolate = d->Interp.Interpolate;
671 break;
672
673 default:
674 continue;
675 }
676
677 semantic_name = d->Semantic.Name;
678 handle_semantic:
679 for (index = d->Range.First; index <= d->Range.Last; index++) {
680 /* Select the correct target */
681 switch(semantic_name) {
682 case TGSI_SEMANTIC_PSIZE:
683 shader->vs_out_misc_write = 1;
684 shader->vs_out_point_size = 1;
685 target = V_008DFC_SQ_EXP_POS + 1;
686 break;
687 case TGSI_SEMANTIC_POSITION:
688 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
689 target = V_008DFC_SQ_EXP_POS;
690 break;
691 } else {
692 depth_index = index;
693 continue;
694 }
695 case TGSI_SEMANTIC_STENCIL:
696 stencil_index = index;
697 continue;
698 case TGSI_SEMANTIC_COLOR:
699 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
700 case TGSI_SEMANTIC_BCOLOR:
701 target = V_008DFC_SQ_EXP_PARAM + param_count;
702 shader->output[i].param_offset = param_count;
703 param_count++;
704 } else {
705 target = V_008DFC_SQ_EXP_MRT + color_count;
706 if (color_count == 0 &&
707 si_shader_ctx->shader->key.ps.alpha_func != PIPE_FUNC_ALWAYS)
708 si_alpha_test(bld_base, index);
709
710 color_count++;
711 }
712 break;
713 case TGSI_SEMANTIC_CLIPDIST:
714 shader->clip_dist_write |=
715 d->Declaration.UsageMask << (d->Semantic.Index << 2);
716 target = V_008DFC_SQ_EXP_POS + 2 + d->Semantic.Index;
717 break;
718 case TGSI_SEMANTIC_CLIPVERTEX:
719 si_llvm_emit_clipvertex(bld_base, index);
720 shader->clip_dist_write = 0xFF;
721 continue;
722 case TGSI_SEMANTIC_FOG:
723 case TGSI_SEMANTIC_GENERIC:
724 target = V_008DFC_SQ_EXP_PARAM + param_count;
725 shader->output[i].param_offset = param_count;
726 param_count++;
727 break;
728 default:
729 target = 0;
730 fprintf(stderr,
731 "Warning: SI unhandled output type:%d\n",
732 semantic_name);
733 }
734
735 si_llvm_init_export_args(bld_base, d, index, target, args);
736
737 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX ?
738 (semantic_name == TGSI_SEMANTIC_POSITION) :
739 (semantic_name == TGSI_SEMANTIC_COLOR)) {
740 if (last_args[0]) {
741 lp_build_intrinsic(base->gallivm->builder,
742 "llvm.SI.export",
743 LLVMVoidTypeInContext(base->gallivm->context),
744 last_args, 9);
745 }
746
747 memcpy(last_args, args, sizeof(args));
748 } else {
749 lp_build_intrinsic(base->gallivm->builder,
750 "llvm.SI.export",
751 LLVMVoidTypeInContext(base->gallivm->context),
752 args, 9);
753 }
754
755 }
756
757 if (semantic_name == TGSI_SEMANTIC_CLIPDIST) {
758 semantic_name = TGSI_SEMANTIC_GENERIC;
759 goto handle_semantic;
760 }
761 }
762
763 if (depth_index >= 0 || stencil_index >= 0) {
764 LLVMValueRef out_ptr;
765 unsigned mask = 0;
766
767 /* Specify the target we are exporting */
768 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRTZ);
769
770 if (depth_index >= 0) {
771 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[depth_index][2];
772 args[5] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
773 mask |= 0x1;
774
775 if (stencil_index < 0) {
776 args[6] =
777 args[7] =
778 args[8] = args[5];
779 }
780 }
781
782 if (stencil_index >= 0) {
783 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[stencil_index][1];
784 args[7] =
785 args[8] =
786 args[6] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
787 mask |= 0x2;
788
789 if (depth_index < 0)
790 args[5] = args[6];
791 }
792
793 /* Specify which components to enable */
794 args[0] = lp_build_const_int32(base->gallivm, mask);
795
796 args[1] =
797 args[2] =
798 args[4] = uint->zero;
799
800 if (last_args[0])
801 lp_build_intrinsic(base->gallivm->builder,
802 "llvm.SI.export",
803 LLVMVoidTypeInContext(base->gallivm->context),
804 args, 9);
805 else
806 memcpy(last_args, args, sizeof(args));
807 }
808
809 if (!last_args[0]) {
810 assert(si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
811
812 /* Specify which components to enable */
813 last_args[0] = lp_build_const_int32(base->gallivm, 0x0);
814
815 /* Specify the target we are exporting */
816 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
817
818 /* Set COMPR flag to zero to export data as 32-bit */
819 last_args[4] = uint->zero;
820
821 /* dummy bits */
822 last_args[5]= uint->zero;
823 last_args[6]= uint->zero;
824 last_args[7]= uint->zero;
825 last_args[8]= uint->zero;
826
827 si_shader_ctx->shader->spi_shader_col_format |=
828 V_028714_SPI_SHADER_32_ABGR;
829 si_shader_ctx->shader->cb_shader_mask |= S_02823C_OUTPUT0_ENABLE(0xf);
830 }
831
832 /* Specify whether the EXEC mask represents the valid mask */
833 last_args[1] = lp_build_const_int32(base->gallivm,
834 si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
835
836 if (shader->fs_write_all && shader->nr_cbufs > 1) {
837 int i;
838
839 /* Specify that this is not yet the last export */
840 last_args[2] = lp_build_const_int32(base->gallivm, 0);
841
842 for (i = 1; i < shader->nr_cbufs; i++) {
843 /* Specify the target we are exporting */
844 last_args[3] = lp_build_const_int32(base->gallivm,
845 V_008DFC_SQ_EXP_MRT + i);
846
847 lp_build_intrinsic(base->gallivm->builder,
848 "llvm.SI.export",
849 LLVMVoidTypeInContext(base->gallivm->context),
850 last_args, 9);
851
852 si_shader_ctx->shader->spi_shader_col_format |=
853 si_shader_ctx->shader->spi_shader_col_format << 4;
854 si_shader_ctx->shader->cb_shader_mask |=
855 si_shader_ctx->shader->cb_shader_mask << 4;
856 }
857
858 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
859 }
860
861 /* Specify that this is the last export */
862 last_args[2] = lp_build_const_int32(base->gallivm, 1);
863
864 lp_build_intrinsic(base->gallivm->builder,
865 "llvm.SI.export",
866 LLVMVoidTypeInContext(base->gallivm->context),
867 last_args, 9);
868
869 /* XXX: Look up what this function does */
870 /* ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);*/
871 }
872
873 static void tex_fetch_args(
874 struct lp_build_tgsi_context * bld_base,
875 struct lp_build_emit_data * emit_data)
876 {
877 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
878 struct gallivm_state *gallivm = bld_base->base.gallivm;
879 const struct tgsi_full_instruction * inst = emit_data->inst;
880 unsigned opcode = inst->Instruction.Opcode;
881 unsigned target = inst->Texture.Texture;
882 unsigned sampler_src;
883 LLVMValueRef coords[4];
884 LLVMValueRef address[16];
885 int ref_pos;
886 unsigned num_coords = tgsi_util_get_texture_coord_dim(target, &ref_pos);
887 unsigned count = 0;
888 unsigned chan;
889
890 /* Fetch and project texture coordinates */
891 coords[3] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
892 for (chan = 0; chan < 3; chan++ ) {
893 coords[chan] = lp_build_emit_fetch(bld_base,
894 emit_data->inst, 0,
895 chan);
896 if (opcode == TGSI_OPCODE_TXP)
897 coords[chan] = lp_build_emit_llvm_binary(bld_base,
898 TGSI_OPCODE_DIV,
899 coords[chan],
900 coords[3]);
901 }
902
903 if (opcode == TGSI_OPCODE_TXP)
904 coords[3] = bld_base->base.one;
905
906 /* Pack LOD bias value */
907 if (opcode == TGSI_OPCODE_TXB)
908 address[count++] = coords[3];
909
910 if (target == TGSI_TEXTURE_CUBE || target == TGSI_TEXTURE_SHADOWCUBE)
911 radeon_llvm_emit_prepare_cube_coords(bld_base, emit_data, coords);
912
913 /* Pack depth comparison value */
914 switch (target) {
915 case TGSI_TEXTURE_SHADOW1D:
916 case TGSI_TEXTURE_SHADOW1D_ARRAY:
917 case TGSI_TEXTURE_SHADOW2D:
918 case TGSI_TEXTURE_SHADOWRECT:
919 case TGSI_TEXTURE_SHADOWCUBE:
920 case TGSI_TEXTURE_SHADOW2D_ARRAY:
921 assert(ref_pos >= 0);
922 address[count++] = coords[ref_pos];
923 break;
924 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
925 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, 0);
926 }
927
928 /* Pack user derivatives */
929 if (opcode == TGSI_OPCODE_TXD) {
930 for (chan = 0; chan < 2; chan++) {
931 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, chan);
932 if (num_coords > 1)
933 address[count++] = lp_build_emit_fetch(bld_base, inst, 2, chan);
934 }
935 }
936
937 /* Pack texture coordinates */
938 address[count++] = coords[0];
939 if (num_coords > 1)
940 address[count++] = coords[1];
941 if (num_coords > 2)
942 address[count++] = coords[2];
943
944 /* Pack array slice */
945 switch (target) {
946 case TGSI_TEXTURE_1D_ARRAY:
947 address[count++] = coords[1];
948 }
949 switch (target) {
950 case TGSI_TEXTURE_2D_ARRAY:
951 case TGSI_TEXTURE_2D_ARRAY_MSAA:
952 case TGSI_TEXTURE_SHADOW2D_ARRAY:
953 address[count++] = coords[2];
954 }
955 switch (target) {
956 case TGSI_TEXTURE_CUBE_ARRAY:
957 case TGSI_TEXTURE_SHADOW1D_ARRAY:
958 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
959 address[count++] = coords[3];
960 }
961
962 /* Pack LOD */
963 if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXF)
964 address[count++] = coords[3];
965
966 if (count > 16) {
967 assert(!"Cannot handle more than 16 texture address parameters");
968 count = 16;
969 }
970
971 for (chan = 0; chan < count; chan++ ) {
972 address[chan] = LLVMBuildBitCast(gallivm->builder,
973 address[chan],
974 LLVMInt32TypeInContext(gallivm->context),
975 "");
976 }
977
978 sampler_src = emit_data->inst->Instruction.NumSrcRegs - 1;
979
980 /* Resource */
981 emit_data->args[1] = si_shader_ctx->resources[emit_data->inst->Src[sampler_src].Register.Index];
982
983 if (opcode == TGSI_OPCODE_TXF) {
984 /* add tex offsets */
985 if (inst->Texture.NumOffsets) {
986 struct lp_build_context *uint_bld = &bld_base->uint_bld;
987 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
988 const struct tgsi_texture_offset * off = inst->TexOffsets;
989
990 assert(inst->Texture.NumOffsets == 1);
991
992 address[0] =
993 lp_build_add(uint_bld, address[0],
994 bld->immediates[off->Index][off->SwizzleX]);
995 if (num_coords > 1)
996 address[1] =
997 lp_build_add(uint_bld, address[1],
998 bld->immediates[off->Index][off->SwizzleY]);
999 if (num_coords > 2)
1000 address[2] =
1001 lp_build_add(uint_bld, address[2],
1002 bld->immediates[off->Index][off->SwizzleZ]);
1003 }
1004
1005 emit_data->dst_type = LLVMVectorType(
1006 LLVMInt32TypeInContext(bld_base->base.gallivm->context),
1007 4);
1008
1009 emit_data->arg_count = 3;
1010 } else {
1011 /* Sampler */
1012 emit_data->args[2] = si_shader_ctx->samplers[emit_data->inst->Src[sampler_src].Register.Index];
1013
1014 emit_data->dst_type = LLVMVectorType(
1015 LLVMFloatTypeInContext(bld_base->base.gallivm->context),
1016 4);
1017
1018 emit_data->arg_count = 4;
1019 }
1020
1021 /* Dimensions */
1022 emit_data->args[emit_data->arg_count - 1] =
1023 lp_build_const_int32(bld_base->base.gallivm, target);
1024
1025 /* Pad to power of two vector */
1026 while (count < util_next_power_of_two(count))
1027 address[count++] = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
1028
1029 emit_data->args[0] = lp_build_gather_values(gallivm, address, count);
1030 }
1031
1032 static void build_tex_intrinsic(const struct lp_build_tgsi_action * action,
1033 struct lp_build_tgsi_context * bld_base,
1034 struct lp_build_emit_data * emit_data)
1035 {
1036 struct lp_build_context * base = &bld_base->base;
1037 char intr_name[23];
1038
1039 sprintf(intr_name, "%sv%ui32", action->intr_name,
1040 LLVMGetVectorSize(LLVMTypeOf(emit_data->args[0])));
1041
1042 emit_data->output[emit_data->chan] = build_intrinsic(
1043 base->gallivm->builder, intr_name, emit_data->dst_type,
1044 emit_data->args, emit_data->arg_count,
1045 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1046 }
1047
1048 static void txq_fetch_args(
1049 struct lp_build_tgsi_context * bld_base,
1050 struct lp_build_emit_data * emit_data)
1051 {
1052 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
1053 const struct tgsi_full_instruction *inst = emit_data->inst;
1054
1055 /* Mip level */
1056 emit_data->args[0] = lp_build_emit_fetch(bld_base, inst, 0, TGSI_CHAN_X);
1057
1058 /* Resource */
1059 emit_data->args[1] = si_shader_ctx->resources[inst->Src[1].Register.Index];
1060
1061 /* Dimensions */
1062 emit_data->args[2] = lp_build_const_int32(bld_base->base.gallivm,
1063 inst->Texture.Texture);
1064
1065 emit_data->arg_count = 3;
1066
1067 emit_data->dst_type = LLVMVectorType(
1068 LLVMInt32TypeInContext(bld_base->base.gallivm->context),
1069 4);
1070 }
1071
1072 #if HAVE_LLVM >= 0x0304
1073
1074 static void si_llvm_emit_ddxy(
1075 const struct lp_build_tgsi_action * action,
1076 struct lp_build_tgsi_context * bld_base,
1077 struct lp_build_emit_data * emit_data)
1078 {
1079 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
1080 struct gallivm_state *gallivm = bld_base->base.gallivm;
1081 struct lp_build_context * base = &bld_base->base;
1082 const struct tgsi_full_instruction *inst = emit_data->inst;
1083 unsigned opcode = inst->Instruction.Opcode;
1084 LLVMValueRef indices[2];
1085 LLVMValueRef store_ptr, load_ptr0, load_ptr1;
1086 LLVMValueRef tl, trbl, result[4];
1087 LLVMTypeRef i32;
1088 unsigned swizzle[4];
1089 unsigned c;
1090
1091 i32 = LLVMInt32TypeInContext(gallivm->context);
1092
1093 indices[0] = bld_base->uint_bld.zero;
1094 indices[1] = build_intrinsic(gallivm->builder, "llvm.SI.tid", i32,
1095 NULL, 0, LLVMReadNoneAttribute);
1096 store_ptr = LLVMBuildGEP(gallivm->builder, si_shader_ctx->ddxy_lds,
1097 indices, 2, "");
1098
1099 indices[1] = LLVMBuildAnd(gallivm->builder, indices[1],
1100 lp_build_const_int32(gallivm, 0xfffffffc), "");
1101 load_ptr0 = LLVMBuildGEP(gallivm->builder, si_shader_ctx->ddxy_lds,
1102 indices, 2, "");
1103
1104 indices[1] = LLVMBuildAdd(gallivm->builder, indices[1],
1105 lp_build_const_int32(gallivm,
1106 opcode == TGSI_OPCODE_DDX ? 1 : 2),
1107 "");
1108 load_ptr1 = LLVMBuildGEP(gallivm->builder, si_shader_ctx->ddxy_lds,
1109 indices, 2, "");
1110
1111 for (c = 0; c < 4; ++c) {
1112 unsigned i;
1113
1114 swizzle[c] = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], c);
1115 for (i = 0; i < c; ++i) {
1116 if (swizzle[i] == swizzle[c]) {
1117 result[c] = result[i];
1118 break;
1119 }
1120 }
1121 if (i != c)
1122 continue;
1123
1124 LLVMBuildStore(gallivm->builder,
1125 LLVMBuildBitCast(gallivm->builder,
1126 lp_build_emit_fetch(bld_base, inst, 0, c),
1127 i32, ""),
1128 store_ptr);
1129
1130 tl = LLVMBuildLoad(gallivm->builder, load_ptr0, "");
1131 tl = LLVMBuildBitCast(gallivm->builder, tl, base->elem_type, "");
1132
1133 trbl = LLVMBuildLoad(gallivm->builder, load_ptr1, "");
1134 trbl = LLVMBuildBitCast(gallivm->builder, trbl, base->elem_type, "");
1135
1136 result[c] = LLVMBuildFSub(gallivm->builder, trbl, tl, "");
1137 }
1138
1139 emit_data->output[0] = lp_build_gather_values(gallivm, result, 4);
1140 }
1141
1142 #endif /* HAVE_LLVM >= 0x0304 */
1143
1144 static const struct lp_build_tgsi_action tex_action = {
1145 .fetch_args = tex_fetch_args,
1146 .emit = build_tex_intrinsic,
1147 .intr_name = "llvm.SI.sample."
1148 };
1149
1150 static const struct lp_build_tgsi_action txb_action = {
1151 .fetch_args = tex_fetch_args,
1152 .emit = build_tex_intrinsic,
1153 .intr_name = "llvm.SI.sampleb."
1154 };
1155
1156 #if HAVE_LLVM >= 0x0304
1157 static const struct lp_build_tgsi_action txd_action = {
1158 .fetch_args = tex_fetch_args,
1159 .emit = build_tex_intrinsic,
1160 .intr_name = "llvm.SI.sampled."
1161 };
1162 #endif
1163
1164 static const struct lp_build_tgsi_action txf_action = {
1165 .fetch_args = tex_fetch_args,
1166 .emit = build_tex_intrinsic,
1167 .intr_name = "llvm.SI.imageload."
1168 };
1169
1170 static const struct lp_build_tgsi_action txl_action = {
1171 .fetch_args = tex_fetch_args,
1172 .emit = build_tex_intrinsic,
1173 .intr_name = "llvm.SI.samplel."
1174 };
1175
1176 static const struct lp_build_tgsi_action txq_action = {
1177 .fetch_args = txq_fetch_args,
1178 .emit = build_tgsi_intrinsic_nomem,
1179 .intr_name = "llvm.SI.resinfo"
1180 };
1181
1182 static void create_meta_data(struct si_shader_context *si_shader_ctx)
1183 {
1184 struct gallivm_state *gallivm = si_shader_ctx->radeon_bld.soa.bld_base.base.gallivm;
1185 LLVMValueRef args[3];
1186
1187 args[0] = LLVMMDStringInContext(gallivm->context, "const", 5);
1188 args[1] = 0;
1189 args[2] = lp_build_const_int32(gallivm, 1);
1190
1191 si_shader_ctx->const_md = LLVMMDNodeInContext(gallivm->context, args, 3);
1192 }
1193
1194 static void create_function(struct si_shader_context *si_shader_ctx)
1195 {
1196 struct lp_build_tgsi_context *bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
1197 struct gallivm_state *gallivm = bld_base->base.gallivm;
1198 LLVMTypeRef params[20], f32, i8, i32, v2i32, v3i32;
1199 unsigned i;
1200
1201 i8 = LLVMInt8TypeInContext(gallivm->context);
1202 i32 = LLVMInt32TypeInContext(gallivm->context);
1203 f32 = LLVMFloatTypeInContext(gallivm->context);
1204 v2i32 = LLVMVectorType(i32, 2);
1205 v3i32 = LLVMVectorType(i32, 3);
1206
1207 params[SI_PARAM_CONST] = LLVMPointerType(LLVMVectorType(i8, 16), CONST_ADDR_SPACE);
1208 params[SI_PARAM_SAMPLER] = params[SI_PARAM_CONST];
1209 params[SI_PARAM_RESOURCE] = LLVMPointerType(LLVMVectorType(i8, 32), CONST_ADDR_SPACE);
1210
1211 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
1212 params[SI_PARAM_VERTEX_BUFFER] = params[SI_PARAM_SAMPLER];
1213 params[SI_PARAM_START_INSTANCE] = i32;
1214 params[SI_PARAM_VERTEX_ID] = i32;
1215 params[SI_PARAM_DUMMY_0] = i32;
1216 params[SI_PARAM_DUMMY_1] = i32;
1217 params[SI_PARAM_INSTANCE_ID] = i32;
1218 radeon_llvm_create_func(&si_shader_ctx->radeon_bld, params, 9);
1219
1220 } else {
1221 params[SI_PARAM_PRIM_MASK] = i32;
1222 params[SI_PARAM_PERSP_SAMPLE] = v2i32;
1223 params[SI_PARAM_PERSP_CENTER] = v2i32;
1224 params[SI_PARAM_PERSP_CENTROID] = v2i32;
1225 params[SI_PARAM_PERSP_PULL_MODEL] = v3i32;
1226 params[SI_PARAM_LINEAR_SAMPLE] = v2i32;
1227 params[SI_PARAM_LINEAR_CENTER] = v2i32;
1228 params[SI_PARAM_LINEAR_CENTROID] = v2i32;
1229 params[SI_PARAM_LINE_STIPPLE_TEX] = f32;
1230 params[SI_PARAM_POS_X_FLOAT] = f32;
1231 params[SI_PARAM_POS_Y_FLOAT] = f32;
1232 params[SI_PARAM_POS_Z_FLOAT] = f32;
1233 params[SI_PARAM_POS_W_FLOAT] = f32;
1234 params[SI_PARAM_FRONT_FACE] = f32;
1235 params[SI_PARAM_ANCILLARY] = f32;
1236 params[SI_PARAM_SAMPLE_COVERAGE] = f32;
1237 params[SI_PARAM_POS_FIXED_PT] = f32;
1238 radeon_llvm_create_func(&si_shader_ctx->radeon_bld, params, 20);
1239 }
1240
1241 radeon_llvm_shader_type(si_shader_ctx->radeon_bld.main_fn, si_shader_ctx->type);
1242 for (i = SI_PARAM_CONST; i <= SI_PARAM_VERTEX_BUFFER; ++i) {
1243 LLVMValueRef P = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, i);
1244 LLVMAddAttribute(P, LLVMInRegAttribute);
1245 }
1246
1247 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
1248 LLVMValueRef P = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
1249 SI_PARAM_START_INSTANCE);
1250 LLVMAddAttribute(P, LLVMInRegAttribute);
1251 }
1252
1253 #if HAVE_LLVM >= 0x0304
1254 if (bld_base->info->opcode_count[TGSI_OPCODE_DDX] > 0 ||
1255 bld_base->info->opcode_count[TGSI_OPCODE_DDY] > 0)
1256 si_shader_ctx->ddxy_lds =
1257 LLVMAddGlobalInAddressSpace(gallivm->module,
1258 LLVMArrayType(i32, 64),
1259 "ddxy_lds",
1260 LOCAL_ADDR_SPACE);
1261 #endif
1262 }
1263
1264 static void preload_constants(struct si_shader_context *si_shader_ctx)
1265 {
1266 struct lp_build_tgsi_context * bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
1267 struct gallivm_state * gallivm = bld_base->base.gallivm;
1268 const struct tgsi_shader_info * info = bld_base->info;
1269
1270 unsigned i, num_const = info->file_max[TGSI_FILE_CONSTANT] + 1;
1271
1272 LLVMValueRef ptr;
1273
1274 if (num_const == 0)
1275 return;
1276
1277 /* Allocate space for the constant values */
1278 si_shader_ctx->constants = CALLOC(num_const * 4, sizeof(LLVMValueRef));
1279
1280 /* Load the resource descriptor */
1281 ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
1282 si_shader_ctx->const_resource = build_indexed_load(si_shader_ctx, ptr, bld_base->uint_bld.zero);
1283
1284 /* Load the constants, we rely on the code sinking to do the rest */
1285 for (i = 0; i < num_const * 4; ++i) {
1286 LLVMValueRef args[2] = {
1287 si_shader_ctx->const_resource,
1288 lp_build_const_int32(gallivm, i * 4)
1289 };
1290 si_shader_ctx->constants[i] = build_intrinsic(gallivm->builder, "llvm.SI.load.const",
1291 bld_base->base.elem_type, args, 2, LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1292 }
1293 }
1294
1295 static void preload_samplers(struct si_shader_context *si_shader_ctx)
1296 {
1297 struct lp_build_tgsi_context * bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
1298 struct gallivm_state * gallivm = bld_base->base.gallivm;
1299 const struct tgsi_shader_info * info = bld_base->info;
1300
1301 unsigned i, num_samplers = info->file_max[TGSI_FILE_SAMPLER] + 1;
1302
1303 LLVMValueRef res_ptr, samp_ptr;
1304 LLVMValueRef offset;
1305
1306 if (num_samplers == 0)
1307 return;
1308
1309 /* Allocate space for the values */
1310 si_shader_ctx->resources = CALLOC(num_samplers, sizeof(LLVMValueRef));
1311 si_shader_ctx->samplers = CALLOC(num_samplers, sizeof(LLVMValueRef));
1312
1313 res_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_RESOURCE);
1314 samp_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_SAMPLER);
1315
1316 /* Load the resources and samplers, we rely on the code sinking to do the rest */
1317 for (i = 0; i < num_samplers; ++i) {
1318
1319 /* Resource */
1320 offset = lp_build_const_int32(gallivm, i);
1321 si_shader_ctx->resources[i] = build_indexed_load(si_shader_ctx, res_ptr, offset);
1322
1323 /* Sampler */
1324 offset = lp_build_const_int32(gallivm, i);
1325 si_shader_ctx->samplers[i] = build_indexed_load(si_shader_ctx, samp_ptr, offset);
1326 }
1327 }
1328
1329 int si_compile_llvm(struct r600_context *rctx, struct si_pipe_shader *shader,
1330 LLVMModuleRef mod)
1331 {
1332 unsigned i;
1333 uint32_t *ptr;
1334 bool dump;
1335 struct radeon_llvm_binary binary;
1336
1337 dump = debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE);
1338
1339 memset(&binary, 0, sizeof(binary));
1340 radeon_llvm_compile(mod, &binary,
1341 r600_get_llvm_processor_name(rctx->screen->family), dump);
1342 if (dump) {
1343 fprintf(stderr, "SI CODE:\n");
1344 for (i = 0; i < binary.code_size; i+=4 ) {
1345 fprintf(stderr, "%02x%02x%02x%02x\n", binary.code[i + 3],
1346 binary.code[i + 2], binary.code[i + 1],
1347 binary.code[i]);
1348 }
1349 }
1350
1351 /* XXX: We may be able to emit some of these values directly rather than
1352 * extracting fields to be emitted later.
1353 */
1354 for (i = 0; i < binary.config_size; i+= 8) {
1355 unsigned reg = util_le32_to_cpu(*(uint32_t*)(binary.config + i));
1356 unsigned value = util_le32_to_cpu(*(uint32_t*)(binary.config + i + 4));
1357 switch (reg) {
1358 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
1359 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
1360 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
1361 case R_00B848_COMPUTE_PGM_RSRC1:
1362 shader->num_sgprs = (G_00B028_SGPRS(value) + 1) * 8;
1363 shader->num_vgprs = (G_00B028_VGPRS(value) + 1) * 4;
1364 break;
1365 case R_00B02C_SPI_SHADER_PGM_RSRC2_PS:
1366 shader->lds_size = G_00B02C_EXTRA_LDS_SIZE(value);
1367 break;
1368 case R_00B84C_COMPUTE_PGM_RSRC2:
1369 shader->lds_size = G_00B84C_LDS_SIZE(value);
1370 break;
1371 case R_0286CC_SPI_PS_INPUT_ENA:
1372 shader->spi_ps_input_ena = value;
1373 break;
1374 default:
1375 fprintf(stderr, "Warning: Compiler emitted unknown "
1376 "config register: 0x%x\n", reg);
1377 break;
1378 }
1379 }
1380
1381 /* copy new shader */
1382 si_resource_reference(&shader->bo, NULL);
1383 shader->bo = si_resource_create_custom(rctx->context.screen, PIPE_USAGE_IMMUTABLE,
1384 binary.code_size);
1385 if (shader->bo == NULL) {
1386 return -ENOMEM;
1387 }
1388
1389 ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
1390 if (0 /*R600_BIG_ENDIAN*/) {
1391 for (i = 0; i < binary.code_size / 4; ++i) {
1392 ptr[i] = util_bswap32(*(uint32_t*)(binary.code + i*4));
1393 }
1394 } else {
1395 memcpy(ptr, binary.code, binary.code_size);
1396 }
1397 rctx->ws->buffer_unmap(shader->bo->cs_buf);
1398
1399 free(binary.code);
1400 free(binary.config);
1401
1402 return 0;
1403 }
1404
1405 int si_pipe_shader_create(
1406 struct pipe_context *ctx,
1407 struct si_pipe_shader *shader)
1408 {
1409 struct r600_context *rctx = (struct r600_context*)ctx;
1410 struct si_pipe_shader_selector *sel = shader->selector;
1411 struct si_shader_context si_shader_ctx;
1412 struct tgsi_shader_info shader_info;
1413 struct lp_build_tgsi_context * bld_base;
1414 LLVMModuleRef mod;
1415 bool dump;
1416 int r = 0;
1417
1418 dump = debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE);
1419
1420 assert(shader->shader.noutput == 0);
1421 assert(shader->shader.ninterp == 0);
1422 assert(shader->shader.ninput == 0);
1423
1424 memset(&si_shader_ctx, 0, sizeof(si_shader_ctx));
1425 radeon_llvm_context_init(&si_shader_ctx.radeon_bld);
1426 bld_base = &si_shader_ctx.radeon_bld.soa.bld_base;
1427
1428 tgsi_scan_shader(sel->tokens, &shader_info);
1429
1430 shader->shader.uses_kill = shader_info.uses_kill;
1431 shader->shader.uses_instanceid = shader_info.uses_instanceid;
1432 bld_base->info = &shader_info;
1433 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
1434 bld_base->emit_epilogue = si_llvm_emit_epilogue;
1435
1436 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
1437 bld_base->op_actions[TGSI_OPCODE_TXB] = txb_action;
1438 #if HAVE_LLVM >= 0x0304
1439 bld_base->op_actions[TGSI_OPCODE_TXD] = txd_action;
1440 #endif
1441 bld_base->op_actions[TGSI_OPCODE_TXF] = txf_action;
1442 bld_base->op_actions[TGSI_OPCODE_TXL] = txl_action;
1443 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
1444 bld_base->op_actions[TGSI_OPCODE_TXQ] = txq_action;
1445
1446 #if HAVE_LLVM >= 0x0304
1447 bld_base->op_actions[TGSI_OPCODE_DDX].emit = si_llvm_emit_ddxy;
1448 bld_base->op_actions[TGSI_OPCODE_DDY].emit = si_llvm_emit_ddxy;
1449 #endif
1450
1451 si_shader_ctx.radeon_bld.load_input = declare_input;
1452 si_shader_ctx.radeon_bld.load_system_value = declare_system_value;
1453 si_shader_ctx.tokens = sel->tokens;
1454 tgsi_parse_init(&si_shader_ctx.parse, si_shader_ctx.tokens);
1455 si_shader_ctx.shader = shader;
1456 si_shader_ctx.type = si_shader_ctx.parse.FullHeader.Processor.Processor;
1457
1458 create_meta_data(&si_shader_ctx);
1459 create_function(&si_shader_ctx);
1460 preload_constants(&si_shader_ctx);
1461 preload_samplers(&si_shader_ctx);
1462
1463 shader->shader.nr_cbufs = rctx->framebuffer.nr_cbufs;
1464
1465 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
1466 * conversion fails. */
1467 if (dump) {
1468 tgsi_dump(sel->tokens, 0);
1469 }
1470
1471 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
1472 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
1473 FREE(si_shader_ctx.constants);
1474 FREE(si_shader_ctx.resources);
1475 FREE(si_shader_ctx.samplers);
1476 return -EINVAL;
1477 }
1478
1479 radeon_llvm_finalize_module(&si_shader_ctx.radeon_bld);
1480
1481 mod = bld_base->base.gallivm->module;
1482 r = si_compile_llvm(rctx, shader, mod);
1483
1484 radeon_llvm_dispose(&si_shader_ctx.radeon_bld);
1485 tgsi_parse_free(&si_shader_ctx.parse);
1486
1487 FREE(si_shader_ctx.constants);
1488 FREE(si_shader_ctx.resources);
1489 FREE(si_shader_ctx.samplers);
1490
1491 return r;
1492 }
1493
1494 void si_pipe_shader_destroy(struct pipe_context *ctx, struct si_pipe_shader *shader)
1495 {
1496 si_resource_reference(&shader->bo, NULL);
1497 }