radeonsi: Handle TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
[mesa.git] / src / gallium / drivers / radeonsi / radeonsi_shader.c
1
2 /*
3 * Copyright 2012 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Tom Stellard <thomas.stellard@amd.com>
26 * Michel Dänzer <michel.daenzer@amd.com>
27 * Christian König <christian.koenig@amd.com>
28 */
29
30 #include "gallivm/lp_bld_tgsi_action.h"
31 #include "gallivm/lp_bld_const.h"
32 #include "gallivm/lp_bld_gather.h"
33 #include "gallivm/lp_bld_intr.h"
34 #include "gallivm/lp_bld_logic.h"
35 #include "gallivm/lp_bld_tgsi.h"
36 #include "radeon_llvm.h"
37 #include "radeon_llvm_emit.h"
38 #include "tgsi/tgsi_info.h"
39 #include "tgsi/tgsi_parse.h"
40 #include "tgsi/tgsi_scan.h"
41 #include "tgsi/tgsi_dump.h"
42
43 #include "radeonsi_pipe.h"
44 #include "radeonsi_shader.h"
45 #include "si_state.h"
46 #include "sid.h"
47
48 #include <assert.h>
49 #include <errno.h>
50 #include <stdio.h>
51
52 struct si_shader_context
53 {
54 struct radeon_llvm_context radeon_bld;
55 struct r600_context *rctx;
56 struct tgsi_parse_context parse;
57 struct tgsi_token * tokens;
58 struct si_pipe_shader *shader;
59 struct si_shader_key key;
60 unsigned type; /* TGSI_PROCESSOR_* specifies the type of shader. */
61 unsigned ninput_emitted;
62 /* struct list_head inputs; */
63 /* unsigned * input_mappings *//* From TGSI to SI hw */
64 /* struct tgsi_shader_info info;*/
65 };
66
67 static struct si_shader_context * si_shader_context(
68 struct lp_build_tgsi_context * bld_base)
69 {
70 return (struct si_shader_context *)bld_base;
71 }
72
73
74 #define PERSPECTIVE_BASE 0
75 #define LINEAR_BASE 9
76
77 #define SAMPLE_OFFSET 0
78 #define CENTER_OFFSET 2
79 #define CENTROID_OFSET 4
80
81 #define USE_SGPR_MAX_SUFFIX_LEN 5
82 #define CONST_ADDR_SPACE 2
83 #define USER_SGPR_ADDR_SPACE 8
84
85 enum sgpr_type {
86 SGPR_CONST_PTR_F32,
87 SGPR_CONST_PTR_V4I32,
88 SGPR_CONST_PTR_V8I32,
89 SGPR_I32,
90 SGPR_I64
91 };
92
93 /**
94 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad
95 *
96 * @param offset The offset parameter specifies the number of
97 * elements to offset, not the number of bytes or dwords. An element is the
98 * the type pointed to by the base_ptr parameter (e.g. int is the element of
99 * an int* pointer)
100 *
101 * When LLVM lowers the load instruction, it will convert the element offset
102 * into a dword offset automatically.
103 *
104 */
105 static LLVMValueRef build_indexed_load(
106 struct gallivm_state * gallivm,
107 LLVMValueRef base_ptr,
108 LLVMValueRef offset)
109 {
110 LLVMValueRef computed_ptr = LLVMBuildGEP(
111 gallivm->builder, base_ptr, &offset, 1, "");
112
113 return LLVMBuildLoad(gallivm->builder, computed_ptr, "");
114 }
115
116 /**
117 * Load a value stored in one of the user SGPRs
118 *
119 * @param sgpr This is the sgpr to load the value from. If you need to load a
120 * value that is stored in consecutive SGPR registers (e.g. a 64-bit pointer),
121 * then you should pass the index of the first SGPR that holds the value. For
122 * example, if you want to load a pointer that is stored in SGPRs 2 and 3, then
123 * use pass 2 for the sgpr parameter.
124 *
125 * The value of the sgpr parameter must also be aligned to the width of the type
126 * being loaded, so that the sgpr parameter is divisible by the dword width of the
127 * type. For example, if the value being loaded is two dwords wide, then the sgpr
128 * parameter must be divisible by two.
129 */
130 static LLVMValueRef use_sgpr(
131 struct gallivm_state * gallivm,
132 enum sgpr_type type,
133 unsigned sgpr)
134 {
135 LLVMValueRef sgpr_index;
136 LLVMTypeRef ret_type;
137 LLVMValueRef ptr;
138
139 sgpr_index = lp_build_const_int32(gallivm, sgpr);
140
141 switch (type) {
142 case SGPR_CONST_PTR_F32:
143 assert(sgpr % 2 == 0);
144 ret_type = LLVMFloatTypeInContext(gallivm->context);
145 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
146 break;
147
148 case SGPR_I32:
149 ret_type = LLVMInt32TypeInContext(gallivm->context);
150 break;
151
152 case SGPR_I64:
153 assert(sgpr % 2 == 0);
154 ret_type= LLVMInt64TypeInContext(gallivm->context);
155 break;
156
157 case SGPR_CONST_PTR_V4I32:
158 assert(sgpr % 2 == 0);
159 ret_type = LLVMInt32TypeInContext(gallivm->context);
160 ret_type = LLVMVectorType(ret_type, 4);
161 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
162 break;
163
164 case SGPR_CONST_PTR_V8I32:
165 assert(sgpr % 2 == 0);
166 ret_type = LLVMInt32TypeInContext(gallivm->context);
167 ret_type = LLVMVectorType(ret_type, 8);
168 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
169 break;
170
171 default:
172 assert(!"Unsupported SGPR type in use_sgpr()");
173 return NULL;
174 }
175
176 ret_type = LLVMPointerType(ret_type, USER_SGPR_ADDR_SPACE);
177 ptr = LLVMBuildIntToPtr(gallivm->builder, sgpr_index, ret_type, "");
178 return LLVMBuildLoad(gallivm->builder, ptr, "");
179 }
180
181 static void declare_input_vs(
182 struct si_shader_context * si_shader_ctx,
183 unsigned input_index,
184 const struct tgsi_full_declaration *decl)
185 {
186 LLVMValueRef t_list_ptr;
187 LLVMValueRef t_offset;
188 LLVMValueRef t_list;
189 LLVMValueRef attribute_offset;
190 LLVMValueRef buffer_index_reg;
191 LLVMValueRef args[3];
192 LLVMTypeRef vec4_type;
193 LLVMValueRef input;
194 struct lp_build_context * uint = &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
195 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
196 //struct pipe_vertex_element *velem = &rctx->vertex_elements->elements[input_index];
197 unsigned chan;
198
199 /* Load the T list */
200 t_list_ptr = use_sgpr(base->gallivm, SGPR_CONST_PTR_V4I32, SI_SGPR_VERTEX_BUFFER);
201
202 t_offset = lp_build_const_int32(base->gallivm, input_index);
203
204 t_list = build_indexed_load(base->gallivm, t_list_ptr, t_offset);
205
206 /* Build the attribute offset */
207 attribute_offset = lp_build_const_int32(base->gallivm, 0);
208
209 /* Load the buffer index is always, which is always stored in VGPR0
210 * for Vertex Shaders */
211 buffer_index_reg = build_intrinsic(base->gallivm->builder,
212 "llvm.SI.vs.load.buffer.index", uint->elem_type, NULL, 0,
213 LLVMReadNoneAttribute);
214
215 vec4_type = LLVMVectorType(base->elem_type, 4);
216 args[0] = t_list;
217 args[1] = attribute_offset;
218 args[2] = buffer_index_reg;
219 input = lp_build_intrinsic(base->gallivm->builder,
220 "llvm.SI.vs.load.input", vec4_type, args, 3);
221
222 /* Break up the vec4 into individual components */
223 for (chan = 0; chan < 4; chan++) {
224 LLVMValueRef llvm_chan = lp_build_const_int32(base->gallivm, chan);
225 /* XXX: Use a helper function for this. There is one in
226 * tgsi_llvm.c. */
227 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, chan)] =
228 LLVMBuildExtractElement(base->gallivm->builder,
229 input, llvm_chan, "");
230 }
231 }
232
233 static void declare_input_fs(
234 struct si_shader_context * si_shader_ctx,
235 unsigned input_index,
236 const struct tgsi_full_declaration *decl)
237 {
238 const char * intr_name;
239 unsigned chan;
240 struct si_shader *shader = &si_shader_ctx->shader->shader;
241 struct lp_build_context * base =
242 &si_shader_ctx->radeon_bld.soa.bld_base.base;
243 struct gallivm_state * gallivm = base->gallivm;
244 LLVMTypeRef input_type = LLVMFloatTypeInContext(gallivm->context);
245
246 /* This value is:
247 * [15:0] NewPrimMask (Bit mask for each quad. It is set it the
248 * quad begins a new primitive. Bit 0 always needs
249 * to be unset)
250 * [32:16] ParamOffset
251 *
252 */
253 LLVMValueRef params = use_sgpr(base->gallivm, SGPR_I32, SI_PS_NUM_USER_SGPR);
254 LLVMValueRef attr_number;
255
256 if (decl->Semantic.Name == TGSI_SEMANTIC_POSITION) {
257 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
258 LLVMValueRef args[1];
259 unsigned soa_index =
260 radeon_llvm_reg_index_soa(input_index, chan);
261 args[0] = lp_build_const_int32(gallivm, chan);
262 si_shader_ctx->radeon_bld.inputs[soa_index] =
263 build_intrinsic(base->gallivm->builder,
264 "llvm.SI.fs.read.pos", input_type,
265 args, 1, LLVMReadNoneAttribute);
266 }
267 return;
268 }
269
270 if (decl->Semantic.Name == TGSI_SEMANTIC_FACE) {
271 LLVMValueRef face, is_face_positive;
272
273 face = build_intrinsic(gallivm->builder,
274 "llvm.SI.fs.read.face",
275 input_type,
276 NULL, 0, LLVMReadNoneAttribute);
277 is_face_positive = LLVMBuildFCmp(gallivm->builder,
278 LLVMRealUGT, face,
279 lp_build_const_float(gallivm, 0.0f),
280 "");
281
282 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 0)] =
283 LLVMBuildSelect(gallivm->builder,
284 is_face_positive,
285 lp_build_const_float(gallivm, 1.0f),
286 lp_build_const_float(gallivm, 0.0f),
287 "");
288 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 1)] =
289 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 2)] =
290 lp_build_const_float(gallivm, 0.0f);
291 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 3)] =
292 lp_build_const_float(gallivm, 1.0f);
293
294 return;
295 }
296
297 shader->input[input_index].param_offset = shader->ninterp++;
298 attr_number = lp_build_const_int32(gallivm,
299 shader->input[input_index].param_offset);
300
301 /* XXX: Handle all possible interpolation modes */
302 switch (decl->Interp.Interpolate) {
303 case TGSI_INTERPOLATE_COLOR:
304 /* XXX: Flat shading hangs the GPU */
305 if (si_shader_ctx->rctx->queued.named.rasterizer &&
306 si_shader_ctx->rctx->queued.named.rasterizer->flatshade) {
307 #if 0
308 intr_name = "llvm.SI.fs.interp.constant";
309 #else
310 intr_name = "llvm.SI.fs.interp.linear.center";
311 #endif
312 } else {
313 if (decl->Interp.Centroid)
314 intr_name = "llvm.SI.fs.interp.persp.centroid";
315 else
316 intr_name = "llvm.SI.fs.interp.persp.center";
317 }
318 break;
319 case TGSI_INTERPOLATE_CONSTANT:
320 /* XXX: Flat shading hangs the GPU */
321 #if 0
322 intr_name = "llvm.SI.fs.interp.constant";
323 break;
324 #endif
325 case TGSI_INTERPOLATE_LINEAR:
326 if (decl->Interp.Centroid)
327 intr_name = "llvm.SI.fs.interp.linear.centroid";
328 else
329 intr_name = "llvm.SI.fs.interp.linear.center";
330 break;
331 case TGSI_INTERPOLATE_PERSPECTIVE:
332 if (decl->Interp.Centroid)
333 intr_name = "llvm.SI.fs.interp.persp.centroid";
334 else
335 intr_name = "llvm.SI.fs.interp.persp.center";
336 break;
337 default:
338 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
339 return;
340 }
341
342 if (!si_shader_ctx->ninput_emitted++) {
343 /* Enable whole quad mode */
344 lp_build_intrinsic(gallivm->builder,
345 "llvm.SI.wqm",
346 LLVMVoidTypeInContext(gallivm->context),
347 NULL, 0);
348 }
349
350 /* XXX: Could there be more than TGSI_NUM_CHANNELS (4) ? */
351 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR &&
352 si_shader_ctx->key.color_two_side) {
353 LLVMValueRef args[3];
354 LLVMValueRef face, is_face_positive;
355 LLVMValueRef back_attr_number =
356 lp_build_const_int32(gallivm,
357 shader->input[input_index].param_offset + 1);
358
359 face = build_intrinsic(gallivm->builder,
360 "llvm.SI.fs.read.face",
361 input_type,
362 NULL, 0, LLVMReadNoneAttribute);
363 is_face_positive = LLVMBuildFCmp(gallivm->builder,
364 LLVMRealUGT, face,
365 lp_build_const_float(gallivm, 0.0f),
366 "");
367
368 args[2] = params;
369 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
370 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
371 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
372 LLVMValueRef front, back;
373
374 args[0] = llvm_chan;
375 args[1] = attr_number;
376 front = build_intrinsic(base->gallivm->builder, intr_name,
377 input_type, args, 3, LLVMReadOnlyAttribute);
378
379 args[1] = back_attr_number;
380 back = build_intrinsic(base->gallivm->builder, intr_name,
381 input_type, args, 3, LLVMReadOnlyAttribute);
382
383 si_shader_ctx->radeon_bld.inputs[soa_index] =
384 LLVMBuildSelect(gallivm->builder,
385 is_face_positive,
386 front,
387 back,
388 "");
389 }
390
391 shader->ninterp++;
392 } else {
393 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
394 LLVMValueRef args[3];
395 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
396 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
397 args[0] = llvm_chan;
398 args[1] = attr_number;
399 args[2] = params;
400 si_shader_ctx->radeon_bld.inputs[soa_index] =
401 build_intrinsic(base->gallivm->builder, intr_name,
402 input_type, args, 3, LLVMReadOnlyAttribute);
403 }
404 }
405 }
406
407 static void declare_input(
408 struct radeon_llvm_context * radeon_bld,
409 unsigned input_index,
410 const struct tgsi_full_declaration *decl)
411 {
412 struct si_shader_context * si_shader_ctx =
413 si_shader_context(&radeon_bld->soa.bld_base);
414 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
415 declare_input_vs(si_shader_ctx, input_index, decl);
416 } else if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
417 declare_input_fs(si_shader_ctx, input_index, decl);
418 } else {
419 fprintf(stderr, "Warning: Unsupported shader type,\n");
420 }
421 }
422
423 static LLVMValueRef fetch_constant(
424 struct lp_build_tgsi_context * bld_base,
425 const struct tgsi_full_src_register *reg,
426 enum tgsi_opcode_type type,
427 unsigned swizzle)
428 {
429 struct lp_build_context * base = &bld_base->base;
430 unsigned idx;
431
432 LLVMValueRef const_ptr;
433 LLVMValueRef offset;
434 LLVMValueRef load;
435
436 if (swizzle == LP_CHAN_ALL) {
437 unsigned chan;
438 LLVMValueRef values[4];
439 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
440 values[chan] = fetch_constant(bld_base, reg, type, chan);
441
442 return lp_build_gather_values(bld_base->base.gallivm, values, 4);
443 }
444
445 /* currently not supported */
446 if (reg->Register.Indirect) {
447 assert(0);
448 load = lp_build_const_int32(base->gallivm, 0);
449 return bitcast(bld_base, type, load);
450 }
451
452 const_ptr = use_sgpr(base->gallivm, SGPR_CONST_PTR_F32, SI_SGPR_CONST);
453
454 /* XXX: This assumes that the constant buffer is not packed, so
455 * CONST[0].x will have an offset of 0 and CONST[1].x will have an
456 * offset of 4. */
457 idx = (reg->Register.Index * 4) + swizzle;
458 offset = lp_build_const_int32(base->gallivm, idx);
459
460 load = build_indexed_load(base->gallivm, const_ptr, offset);
461 return bitcast(bld_base, type, load);
462 }
463
464 /* Initialize arguments for the shader export intrinsic */
465 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
466 struct tgsi_full_declaration *d,
467 unsigned index,
468 unsigned target,
469 LLVMValueRef *args)
470 {
471 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
472 struct lp_build_context *uint =
473 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
474 struct lp_build_context *base = &bld_base->base;
475 unsigned compressed = 0;
476 unsigned chan;
477
478 if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
479 int cbuf = target - V_008DFC_SQ_EXP_MRT;
480
481 if (cbuf >= 0 && cbuf < 8) {
482 struct r600_context *rctx = si_shader_ctx->rctx;
483 compressed = (si_shader_ctx->key.export_16bpc >> cbuf) & 0x1;
484
485 if (compressed)
486 si_shader_ctx->shader->spi_shader_col_format |=
487 V_028714_SPI_SHADER_FP16_ABGR << (4 * cbuf);
488 else
489 si_shader_ctx->shader->spi_shader_col_format |=
490 V_028714_SPI_SHADER_32_ABGR << (4 * cbuf);
491 }
492 }
493
494 if (compressed) {
495 /* Pixel shader needs to pack output values before export */
496 for (chan = 0; chan < 2; chan++ ) {
497 LLVMValueRef *out_ptr =
498 si_shader_ctx->radeon_bld.soa.outputs[index];
499 args[0] = LLVMBuildLoad(base->gallivm->builder,
500 out_ptr[2 * chan], "");
501 args[1] = LLVMBuildLoad(base->gallivm->builder,
502 out_ptr[2 * chan + 1], "");
503 args[chan + 5] =
504 build_intrinsic(base->gallivm->builder,
505 "llvm.SI.packf16",
506 LLVMInt32TypeInContext(base->gallivm->context),
507 args, 2,
508 LLVMReadNoneAttribute);
509 args[chan + 7] = args[chan + 5] =
510 LLVMBuildBitCast(base->gallivm->builder,
511 args[chan + 5],
512 LLVMFloatTypeInContext(base->gallivm->context),
513 "");
514 }
515
516 /* Set COMPR flag */
517 args[4] = uint->one;
518 } else {
519 for (chan = 0; chan < 4; chan++ ) {
520 LLVMValueRef out_ptr =
521 si_shader_ctx->radeon_bld.soa.outputs[index][chan];
522 /* +5 because the first output value will be
523 * the 6th argument to the intrinsic. */
524 args[chan + 5] = LLVMBuildLoad(base->gallivm->builder,
525 out_ptr, "");
526 }
527
528 /* Clear COMPR flag */
529 args[4] = uint->zero;
530 }
531
532 /* XXX: This controls which components of the output
533 * registers actually get exported. (e.g bit 0 means export
534 * X component, bit 1 means export Y component, etc.) I'm
535 * hard coding this to 0xf for now. In the future, we might
536 * want to do something else. */
537 args[0] = lp_build_const_int32(base->gallivm, 0xf);
538
539 /* Specify whether the EXEC mask represents the valid mask */
540 args[1] = uint->zero;
541
542 /* Specify whether this is the last export */
543 args[2] = uint->zero;
544
545 /* Specify the target we are exporting */
546 args[3] = lp_build_const_int32(base->gallivm, target);
547
548 /* XXX: We probably need to keep track of the output
549 * values, so we know what we are passing to the next
550 * stage. */
551 }
552
553 static void si_llvm_emit_prologue(struct lp_build_tgsi_context *bld_base)
554 {
555 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
556 struct gallivm_state *gallivm = bld_base->base.gallivm;
557 lp_build_intrinsic_unary(gallivm->builder,
558 "llvm.AMDGPU.shader.type",
559 LLVMVoidTypeInContext(gallivm->context),
560 lp_build_const_int32(gallivm, si_shader_ctx->type));
561 }
562
563
564 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
565 unsigned index)
566 {
567 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
568 struct gallivm_state *gallivm = bld_base->base.gallivm;
569
570 if (si_shader_ctx->key.alpha_func != PIPE_FUNC_NEVER) {
571 LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][3];
572 LLVMValueRef alpha_pass =
573 lp_build_cmp(&bld_base->base,
574 si_shader_ctx->key.alpha_func,
575 LLVMBuildLoad(gallivm->builder, out_ptr, ""),
576 lp_build_const_float(gallivm, si_shader_ctx->key.alpha_ref));
577 LLVMValueRef arg =
578 lp_build_select(&bld_base->base,
579 alpha_pass,
580 lp_build_const_float(gallivm, 1.0f),
581 lp_build_const_float(gallivm, -1.0f));
582
583 build_intrinsic(gallivm->builder,
584 "llvm.AMDGPU.kill",
585 LLVMVoidTypeInContext(gallivm->context),
586 &arg, 1, 0);
587 } else {
588 build_intrinsic(gallivm->builder,
589 "llvm.AMDGPU.kilp",
590 LLVMVoidTypeInContext(gallivm->context),
591 NULL, 0, 0);
592 }
593 }
594
595 /* XXX: This is partially implemented for VS only at this point. It is not complete */
596 static void si_llvm_emit_epilogue(struct lp_build_tgsi_context * bld_base)
597 {
598 struct si_shader_context * si_shader_ctx = si_shader_context(bld_base);
599 struct si_shader * shader = &si_shader_ctx->shader->shader;
600 struct lp_build_context * base = &bld_base->base;
601 struct lp_build_context * uint =
602 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
603 struct tgsi_parse_context *parse = &si_shader_ctx->parse;
604 LLVMValueRef args[9];
605 LLVMValueRef last_args[9] = { 0 };
606 unsigned color_count = 0;
607 unsigned param_count = 0;
608 int depth_index = -1, stencil_index = -1;
609
610 while (!tgsi_parse_end_of_tokens(parse)) {
611 struct tgsi_full_declaration *d =
612 &parse->FullToken.FullDeclaration;
613 unsigned target;
614 unsigned index;
615 int i;
616
617 tgsi_parse_token(parse);
618
619 if (parse->FullToken.Token.Type == TGSI_TOKEN_TYPE_PROPERTY &&
620 parse->FullToken.FullProperty.Property.PropertyName ==
621 TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS)
622 shader->fs_write_all = TRUE;
623
624 if (parse->FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
625 continue;
626
627 switch (d->Declaration.File) {
628 case TGSI_FILE_INPUT:
629 i = shader->ninput++;
630 shader->input[i].name = d->Semantic.Name;
631 shader->input[i].sid = d->Semantic.Index;
632 shader->input[i].interpolate = d->Interp.Interpolate;
633 shader->input[i].centroid = d->Interp.Centroid;
634 continue;
635
636 case TGSI_FILE_OUTPUT:
637 i = shader->noutput++;
638 shader->output[i].name = d->Semantic.Name;
639 shader->output[i].sid = d->Semantic.Index;
640 shader->output[i].interpolate = d->Interp.Interpolate;
641 break;
642
643 default:
644 continue;
645 }
646
647 for (index = d->Range.First; index <= d->Range.Last; index++) {
648 /* Select the correct target */
649 switch(d->Semantic.Name) {
650 case TGSI_SEMANTIC_PSIZE:
651 target = V_008DFC_SQ_EXP_POS;
652 break;
653 case TGSI_SEMANTIC_POSITION:
654 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
655 target = V_008DFC_SQ_EXP_POS;
656 break;
657 } else {
658 depth_index = index;
659 continue;
660 }
661 case TGSI_SEMANTIC_STENCIL:
662 stencil_index = index;
663 continue;
664 case TGSI_SEMANTIC_COLOR:
665 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
666 case TGSI_SEMANTIC_BCOLOR:
667 target = V_008DFC_SQ_EXP_PARAM + param_count;
668 shader->output[i].param_offset = param_count;
669 param_count++;
670 } else {
671 target = V_008DFC_SQ_EXP_MRT + color_count;
672 if (color_count == 0 &&
673 si_shader_ctx->key.alpha_func != PIPE_FUNC_ALWAYS)
674 si_alpha_test(bld_base, index);
675
676 color_count++;
677 }
678 break;
679 case TGSI_SEMANTIC_FOG:
680 case TGSI_SEMANTIC_GENERIC:
681 target = V_008DFC_SQ_EXP_PARAM + param_count;
682 shader->output[i].param_offset = param_count;
683 param_count++;
684 break;
685 default:
686 target = 0;
687 fprintf(stderr,
688 "Warning: SI unhandled output type:%d\n",
689 d->Semantic.Name);
690 }
691
692 si_llvm_init_export_args(bld_base, d, index, target, args);
693
694 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX ?
695 (d->Semantic.Name == TGSI_SEMANTIC_POSITION) :
696 (d->Semantic.Name == TGSI_SEMANTIC_COLOR)) {
697 if (last_args[0]) {
698 lp_build_intrinsic(base->gallivm->builder,
699 "llvm.SI.export",
700 LLVMVoidTypeInContext(base->gallivm->context),
701 last_args, 9);
702 }
703
704 memcpy(last_args, args, sizeof(args));
705 } else {
706 lp_build_intrinsic(base->gallivm->builder,
707 "llvm.SI.export",
708 LLVMVoidTypeInContext(base->gallivm->context),
709 args, 9);
710 }
711
712 }
713 }
714
715 if (depth_index >= 0 || stencil_index >= 0) {
716 LLVMValueRef out_ptr;
717 unsigned mask = 0;
718
719 /* Specify the target we are exporting */
720 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRTZ);
721
722 if (depth_index >= 0) {
723 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[depth_index][2];
724 args[5] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
725 mask |= 0x1;
726
727 if (stencil_index < 0) {
728 args[6] =
729 args[7] =
730 args[8] = args[5];
731 }
732 }
733
734 if (stencil_index >= 0) {
735 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[stencil_index][1];
736 args[7] =
737 args[8] =
738 args[6] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
739 mask |= 0x2;
740
741 if (depth_index < 0)
742 args[5] = args[6];
743 }
744
745 /* Specify which components to enable */
746 args[0] = lp_build_const_int32(base->gallivm, mask);
747
748 args[1] =
749 args[2] =
750 args[4] = uint->zero;
751
752 if (last_args[0])
753 lp_build_intrinsic(base->gallivm->builder,
754 "llvm.SI.export",
755 LLVMVoidTypeInContext(base->gallivm->context),
756 args, 9);
757 else
758 memcpy(last_args, args, sizeof(args));
759 }
760
761 if (!last_args[0]) {
762 assert(si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
763
764 /* Specify which components to enable */
765 last_args[0] = lp_build_const_int32(base->gallivm, 0x0);
766
767 /* Specify the target we are exporting */
768 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
769
770 /* Set COMPR flag to zero to export data as 32-bit */
771 last_args[4] = uint->zero;
772
773 /* dummy bits */
774 last_args[5]= uint->zero;
775 last_args[6]= uint->zero;
776 last_args[7]= uint->zero;
777 last_args[8]= uint->zero;
778
779 si_shader_ctx->shader->spi_shader_col_format |=
780 V_028714_SPI_SHADER_32_ABGR;
781 }
782
783 /* Specify whether the EXEC mask represents the valid mask */
784 last_args[1] = lp_build_const_int32(base->gallivm,
785 si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
786
787 if (shader->fs_write_all && shader->nr_cbufs > 1) {
788 int i;
789
790 /* Specify that this is not yet the last export */
791 last_args[2] = lp_build_const_int32(base->gallivm, 0);
792
793 for (i = 1; i < shader->nr_cbufs; i++) {
794 /* Specify the target we are exporting */
795 last_args[3] = lp_build_const_int32(base->gallivm,
796 V_008DFC_SQ_EXP_MRT + i);
797
798 lp_build_intrinsic(base->gallivm->builder,
799 "llvm.SI.export",
800 LLVMVoidTypeInContext(base->gallivm->context),
801 last_args, 9);
802
803 si_shader_ctx->shader->spi_shader_col_format |=
804 si_shader_ctx->shader->spi_shader_col_format << 4;
805 }
806
807 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
808 }
809
810 /* Specify that this is the last export */
811 last_args[2] = lp_build_const_int32(base->gallivm, 1);
812
813 lp_build_intrinsic(base->gallivm->builder,
814 "llvm.SI.export",
815 LLVMVoidTypeInContext(base->gallivm->context),
816 last_args, 9);
817
818 /* XXX: Look up what this function does */
819 /* ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);*/
820 }
821
822 static void tex_fetch_args(
823 struct lp_build_tgsi_context * bld_base,
824 struct lp_build_emit_data * emit_data)
825 {
826 struct gallivm_state *gallivm = bld_base->base.gallivm;
827 const struct tgsi_full_instruction * inst = emit_data->inst;
828 unsigned opcode = inst->Instruction.Opcode;
829 unsigned target = inst->Texture.Texture;
830 LLVMValueRef ptr;
831 LLVMValueRef offset;
832 LLVMValueRef coords[4];
833 LLVMValueRef address[16];
834 unsigned count = 0;
835 unsigned chan;
836
837 /* WriteMask */
838 /* XXX: should be optimized using emit_data->inst->Dst[0].Register.WriteMask*/
839 emit_data->args[0] = lp_build_const_int32(bld_base->base.gallivm, 0xf);
840
841 /* Fetch and project texture coordinates */
842 coords[3] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
843 for (chan = 0; chan < 3; chan++ ) {
844 coords[chan] = lp_build_emit_fetch(bld_base,
845 emit_data->inst, 0,
846 chan);
847 if (opcode == TGSI_OPCODE_TXP)
848 coords[chan] = lp_build_emit_llvm_binary(bld_base,
849 TGSI_OPCODE_DIV,
850 coords[chan],
851 coords[3]);
852 }
853
854 if (opcode == TGSI_OPCODE_TXP)
855 coords[3] = bld_base->base.one;
856
857 /* Pack LOD bias value */
858 if (opcode == TGSI_OPCODE_TXB)
859 address[count++] = coords[3];
860
861 if ((target == TGSI_TEXTURE_CUBE || target == TGSI_TEXTURE_SHADOWCUBE) &&
862 opcode != TGSI_OPCODE_TXQ)
863 radeon_llvm_emit_prepare_cube_coords(bld_base, emit_data, coords);
864
865 /* Pack depth comparison value */
866 switch (target) {
867 case TGSI_TEXTURE_SHADOW1D:
868 case TGSI_TEXTURE_SHADOW1D_ARRAY:
869 case TGSI_TEXTURE_SHADOW2D:
870 case TGSI_TEXTURE_SHADOWRECT:
871 address[count++] = coords[2];
872 break;
873 case TGSI_TEXTURE_SHADOWCUBE:
874 case TGSI_TEXTURE_SHADOW2D_ARRAY:
875 address[count++] = coords[3];
876 break;
877 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
878 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, 0);
879 }
880
881 /* Pack texture coordinates */
882 address[count++] = coords[0];
883 switch (target) {
884 case TGSI_TEXTURE_2D:
885 case TGSI_TEXTURE_2D_ARRAY:
886 case TGSI_TEXTURE_3D:
887 case TGSI_TEXTURE_CUBE:
888 case TGSI_TEXTURE_RECT:
889 case TGSI_TEXTURE_SHADOW2D:
890 case TGSI_TEXTURE_SHADOWRECT:
891 case TGSI_TEXTURE_SHADOW2D_ARRAY:
892 case TGSI_TEXTURE_SHADOWCUBE:
893 case TGSI_TEXTURE_2D_MSAA:
894 case TGSI_TEXTURE_2D_ARRAY_MSAA:
895 case TGSI_TEXTURE_CUBE_ARRAY:
896 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
897 address[count++] = coords[1];
898 }
899 switch (target) {
900 case TGSI_TEXTURE_3D:
901 case TGSI_TEXTURE_CUBE:
902 case TGSI_TEXTURE_SHADOWCUBE:
903 case TGSI_TEXTURE_CUBE_ARRAY:
904 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
905 address[count++] = coords[2];
906 }
907
908 /* Pack array slice */
909 switch (target) {
910 case TGSI_TEXTURE_1D_ARRAY:
911 address[count++] = coords[1];
912 }
913 switch (target) {
914 case TGSI_TEXTURE_2D_ARRAY:
915 case TGSI_TEXTURE_2D_ARRAY_MSAA:
916 case TGSI_TEXTURE_SHADOW2D_ARRAY:
917 address[count++] = coords[2];
918 }
919 switch (target) {
920 case TGSI_TEXTURE_CUBE_ARRAY:
921 case TGSI_TEXTURE_SHADOW1D_ARRAY:
922 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
923 address[count++] = coords[3];
924 }
925
926 /* Pack LOD */
927 if (opcode == TGSI_OPCODE_TXL)
928 address[count++] = coords[3];
929
930 if (count > 16) {
931 assert(!"Cannot handle more than 16 texture address parameters");
932 count = 16;
933 }
934
935 for (chan = 0; chan < count; chan++ ) {
936 address[chan] = LLVMBuildBitCast(gallivm->builder,
937 address[chan],
938 LLVMInt32TypeInContext(gallivm->context),
939 "");
940 }
941
942 /* Pad to power of two vector */
943 while (count < util_next_power_of_two(count))
944 address[count++] = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
945
946 emit_data->args[1] = lp_build_gather_values(gallivm, address, count);
947
948 /* Resource */
949 ptr = use_sgpr(bld_base->base.gallivm, SGPR_CONST_PTR_V8I32, SI_SGPR_RESOURCE);
950 offset = lp_build_const_int32(bld_base->base.gallivm,
951 emit_data->inst->Src[1].Register.Index);
952 emit_data->args[2] = build_indexed_load(bld_base->base.gallivm,
953 ptr, offset);
954
955 /* Sampler */
956 ptr = use_sgpr(bld_base->base.gallivm, SGPR_CONST_PTR_V4I32, SI_SGPR_SAMPLER);
957 offset = lp_build_const_int32(bld_base->base.gallivm,
958 emit_data->inst->Src[1].Register.Index);
959 emit_data->args[3] = build_indexed_load(bld_base->base.gallivm,
960 ptr, offset);
961
962 /* Dimensions */
963 emit_data->args[4] = lp_build_const_int32(bld_base->base.gallivm, target);
964
965 emit_data->arg_count = 5;
966 /* XXX: To optimize, we could use a float or v2f32, if the last bits of
967 * the writemask are clear */
968 emit_data->dst_type = LLVMVectorType(
969 LLVMFloatTypeInContext(bld_base->base.gallivm->context),
970 4);
971 }
972
973 static void build_tex_intrinsic(const struct lp_build_tgsi_action * action,
974 struct lp_build_tgsi_context * bld_base,
975 struct lp_build_emit_data * emit_data)
976 {
977 struct lp_build_context * base = &bld_base->base;
978 char intr_name[23];
979
980 sprintf(intr_name, "%sv%ui32", action->intr_name,
981 LLVMGetVectorSize(LLVMTypeOf(emit_data->args[1])));
982
983 emit_data->output[emit_data->chan] = lp_build_intrinsic(
984 base->gallivm->builder, intr_name, emit_data->dst_type,
985 emit_data->args, emit_data->arg_count);
986 }
987
988 static const struct lp_build_tgsi_action tex_action = {
989 .fetch_args = tex_fetch_args,
990 .emit = build_tex_intrinsic,
991 .intr_name = "llvm.SI.sample."
992 };
993
994 static const struct lp_build_tgsi_action txb_action = {
995 .fetch_args = tex_fetch_args,
996 .emit = build_tex_intrinsic,
997 .intr_name = "llvm.SI.sampleb."
998 };
999
1000 static const struct lp_build_tgsi_action txl_action = {
1001 .fetch_args = tex_fetch_args,
1002 .emit = build_tex_intrinsic,
1003 .intr_name = "llvm.SI.samplel."
1004 };
1005
1006
1007 int si_pipe_shader_create(
1008 struct pipe_context *ctx,
1009 struct si_pipe_shader *shader,
1010 struct si_shader_key key)
1011 {
1012 struct r600_context *rctx = (struct r600_context*)ctx;
1013 struct si_pipe_shader_selector *sel = shader->selector;
1014 struct si_shader_context si_shader_ctx;
1015 struct tgsi_shader_info shader_info;
1016 struct lp_build_tgsi_context * bld_base;
1017 LLVMModuleRef mod;
1018 unsigned char * inst_bytes;
1019 unsigned inst_byte_count;
1020 unsigned i;
1021 uint32_t *ptr;
1022 bool dump;
1023
1024 dump = debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE);
1025
1026 assert(shader->shader.noutput == 0);
1027 assert(shader->shader.ninterp == 0);
1028 assert(shader->shader.ninput == 0);
1029
1030 memset(&si_shader_ctx, 0, sizeof(si_shader_ctx));
1031 radeon_llvm_context_init(&si_shader_ctx.radeon_bld);
1032 bld_base = &si_shader_ctx.radeon_bld.soa.bld_base;
1033
1034 tgsi_scan_shader(sel->tokens, &shader_info);
1035 if (shader_info.indirect_files != 0) {
1036 fprintf(stderr, "Indirect addressing not fully handled yet\n");
1037 return -ENOSYS;
1038 }
1039
1040 shader->shader.uses_kill = shader_info.uses_kill;
1041 bld_base->info = &shader_info;
1042 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
1043 bld_base->emit_prologue = si_llvm_emit_prologue;
1044 bld_base->emit_epilogue = si_llvm_emit_epilogue;
1045
1046 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
1047 bld_base->op_actions[TGSI_OPCODE_TXB] = txb_action;
1048 bld_base->op_actions[TGSI_OPCODE_TXL] = txl_action;
1049 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
1050
1051 si_shader_ctx.radeon_bld.load_input = declare_input;
1052 si_shader_ctx.tokens = sel->tokens;
1053 tgsi_parse_init(&si_shader_ctx.parse, si_shader_ctx.tokens);
1054 si_shader_ctx.shader = shader;
1055 si_shader_ctx.key = key;
1056 si_shader_ctx.type = si_shader_ctx.parse.FullHeader.Processor.Processor;
1057 si_shader_ctx.rctx = rctx;
1058
1059 shader->shader.nr_cbufs = rctx->framebuffer.nr_cbufs;
1060
1061 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
1062 * conversion fails. */
1063 if (dump) {
1064 tgsi_dump(sel->tokens, 0);
1065 }
1066
1067 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
1068 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
1069 return -EINVAL;
1070 }
1071
1072 radeon_llvm_finalize_module(&si_shader_ctx.radeon_bld);
1073
1074 mod = bld_base->base.gallivm->module;
1075 if (dump) {
1076 LLVMDumpModule(mod);
1077 }
1078 radeon_llvm_compile(mod, &inst_bytes, &inst_byte_count, "SI", dump);
1079 if (dump) {
1080 fprintf(stderr, "SI CODE:\n");
1081 for (i = 0; i < inst_byte_count; i+=4 ) {
1082 fprintf(stderr, "%02x%02x%02x%02x\n", inst_bytes[i + 3],
1083 inst_bytes[i + 2], inst_bytes[i + 1],
1084 inst_bytes[i]);
1085 }
1086 }
1087
1088 shader->num_sgprs = util_le32_to_cpu(*(uint32_t*)inst_bytes);
1089 shader->num_vgprs = util_le32_to_cpu(*(uint32_t*)(inst_bytes + 4));
1090 shader->spi_ps_input_ena = util_le32_to_cpu(*(uint32_t*)(inst_bytes + 8));
1091
1092 radeon_llvm_dispose(&si_shader_ctx.radeon_bld);
1093 tgsi_parse_free(&si_shader_ctx.parse);
1094
1095 /* copy new shader */
1096 si_resource_reference(&shader->bo, NULL);
1097 shader->bo = si_resource_create_custom(ctx->screen, PIPE_USAGE_IMMUTABLE,
1098 inst_byte_count - 12);
1099 if (shader->bo == NULL) {
1100 return -ENOMEM;
1101 }
1102
1103 ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
1104 if (0 /*R600_BIG_ENDIAN*/) {
1105 for (i = 0; i < (inst_byte_count-12)/4; ++i) {
1106 ptr[i] = util_bswap32(*(uint32_t*)(inst_bytes+12 + i*4));
1107 }
1108 } else {
1109 memcpy(ptr, inst_bytes + 12, inst_byte_count - 12);
1110 }
1111 rctx->ws->buffer_unmap(shader->bo->cs_buf);
1112
1113 free(inst_bytes);
1114
1115 return 0;
1116 }
1117
1118 void si_pipe_shader_destroy(struct pipe_context *ctx, struct si_pipe_shader *shader)
1119 {
1120 si_resource_reference(&shader->bo, NULL);
1121 }