radeonsi: support constants as TEX coordinates
[mesa.git] / src / gallium / drivers / radeonsi / radeonsi_shader.c
1
2 /*
3 * Copyright 2012 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Tom Stellard <thomas.stellard@amd.com>
26 * Michel Dänzer <michel.daenzer@amd.com>
27 * Christian König <christian.koenig@amd.com>
28 */
29
30 #include "gallivm/lp_bld_tgsi_action.h"
31 #include "gallivm/lp_bld_const.h"
32 #include "gallivm/lp_bld_gather.h"
33 #include "gallivm/lp_bld_intr.h"
34 #include "gallivm/lp_bld_logic.h"
35 #include "gallivm/lp_bld_tgsi.h"
36 #include "radeon_llvm.h"
37 #include "radeon_llvm_emit.h"
38 #include "tgsi/tgsi_info.h"
39 #include "tgsi/tgsi_parse.h"
40 #include "tgsi/tgsi_scan.h"
41 #include "tgsi/tgsi_dump.h"
42
43 #include "radeonsi_pipe.h"
44 #include "radeonsi_shader.h"
45 #include "si_state.h"
46 #include "sid.h"
47
48 #include <assert.h>
49 #include <errno.h>
50 #include <stdio.h>
51
52 struct si_shader_context
53 {
54 struct radeon_llvm_context radeon_bld;
55 struct r600_context *rctx;
56 struct tgsi_parse_context parse;
57 struct tgsi_token * tokens;
58 struct si_pipe_shader *shader;
59 struct si_shader_key key;
60 unsigned type; /* TGSI_PROCESSOR_* specifies the type of shader. */
61 unsigned ninput_emitted;
62 /* struct list_head inputs; */
63 /* unsigned * input_mappings *//* From TGSI to SI hw */
64 /* struct tgsi_shader_info info;*/
65 };
66
67 static struct si_shader_context * si_shader_context(
68 struct lp_build_tgsi_context * bld_base)
69 {
70 return (struct si_shader_context *)bld_base;
71 }
72
73
74 #define PERSPECTIVE_BASE 0
75 #define LINEAR_BASE 9
76
77 #define SAMPLE_OFFSET 0
78 #define CENTER_OFFSET 2
79 #define CENTROID_OFSET 4
80
81 #define USE_SGPR_MAX_SUFFIX_LEN 5
82 #define CONST_ADDR_SPACE 2
83 #define USER_SGPR_ADDR_SPACE 8
84
85 enum sgpr_type {
86 SGPR_CONST_PTR_F32,
87 SGPR_CONST_PTR_V4I32,
88 SGPR_CONST_PTR_V8I32,
89 SGPR_I32,
90 SGPR_I64
91 };
92
93 /**
94 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad
95 *
96 * @param offset The offset parameter specifies the number of
97 * elements to offset, not the number of bytes or dwords. An element is the
98 * the type pointed to by the base_ptr parameter (e.g. int is the element of
99 * an int* pointer)
100 *
101 * When LLVM lowers the load instruction, it will convert the element offset
102 * into a dword offset automatically.
103 *
104 */
105 static LLVMValueRef build_indexed_load(
106 struct gallivm_state * gallivm,
107 LLVMValueRef base_ptr,
108 LLVMValueRef offset)
109 {
110 LLVMValueRef computed_ptr = LLVMBuildGEP(
111 gallivm->builder, base_ptr, &offset, 1, "");
112
113 return LLVMBuildLoad(gallivm->builder, computed_ptr, "");
114 }
115
116 /**
117 * Load a value stored in one of the user SGPRs
118 *
119 * @param sgpr This is the sgpr to load the value from. If you need to load a
120 * value that is stored in consecutive SGPR registers (e.g. a 64-bit pointer),
121 * then you should pass the index of the first SGPR that holds the value. For
122 * example, if you want to load a pointer that is stored in SGPRs 2 and 3, then
123 * use pass 2 for the sgpr parameter.
124 *
125 * The value of the sgpr parameter must also be aligned to the width of the type
126 * being loaded, so that the sgpr parameter is divisible by the dword width of the
127 * type. For example, if the value being loaded is two dwords wide, then the sgpr
128 * parameter must be divisible by two.
129 */
130 static LLVMValueRef use_sgpr(
131 struct gallivm_state * gallivm,
132 enum sgpr_type type,
133 unsigned sgpr)
134 {
135 LLVMValueRef sgpr_index;
136 LLVMTypeRef ret_type;
137 LLVMValueRef ptr;
138
139 sgpr_index = lp_build_const_int32(gallivm, sgpr);
140
141 switch (type) {
142 case SGPR_CONST_PTR_F32:
143 assert(sgpr % 2 == 0);
144 ret_type = LLVMFloatTypeInContext(gallivm->context);
145 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
146 break;
147
148 case SGPR_I32:
149 ret_type = LLVMInt32TypeInContext(gallivm->context);
150 break;
151
152 case SGPR_I64:
153 assert(sgpr % 2 == 0);
154 ret_type= LLVMInt64TypeInContext(gallivm->context);
155 break;
156
157 case SGPR_CONST_PTR_V4I32:
158 assert(sgpr % 2 == 0);
159 ret_type = LLVMInt32TypeInContext(gallivm->context);
160 ret_type = LLVMVectorType(ret_type, 4);
161 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
162 break;
163
164 case SGPR_CONST_PTR_V8I32:
165 assert(sgpr % 2 == 0);
166 ret_type = LLVMInt32TypeInContext(gallivm->context);
167 ret_type = LLVMVectorType(ret_type, 8);
168 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
169 break;
170
171 default:
172 assert(!"Unsupported SGPR type in use_sgpr()");
173 return NULL;
174 }
175
176 ret_type = LLVMPointerType(ret_type, USER_SGPR_ADDR_SPACE);
177 ptr = LLVMBuildIntToPtr(gallivm->builder, sgpr_index, ret_type, "");
178 return LLVMBuildLoad(gallivm->builder, ptr, "");
179 }
180
181 static void declare_input_vs(
182 struct si_shader_context * si_shader_ctx,
183 unsigned input_index,
184 const struct tgsi_full_declaration *decl)
185 {
186 LLVMValueRef t_list_ptr;
187 LLVMValueRef t_offset;
188 LLVMValueRef t_list;
189 LLVMValueRef attribute_offset;
190 LLVMValueRef buffer_index_reg;
191 LLVMValueRef args[3];
192 LLVMTypeRef vec4_type;
193 LLVMValueRef input;
194 struct lp_build_context * uint = &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
195 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
196 //struct pipe_vertex_element *velem = &rctx->vertex_elements->elements[input_index];
197 unsigned chan;
198
199 /* Load the T list */
200 t_list_ptr = use_sgpr(base->gallivm, SGPR_CONST_PTR_V4I32, SI_SGPR_VERTEX_BUFFER);
201
202 t_offset = lp_build_const_int32(base->gallivm, input_index);
203
204 t_list = build_indexed_load(base->gallivm, t_list_ptr, t_offset);
205
206 /* Build the attribute offset */
207 attribute_offset = lp_build_const_int32(base->gallivm, 0);
208
209 /* Load the buffer index is always, which is always stored in VGPR0
210 * for Vertex Shaders */
211 buffer_index_reg = build_intrinsic(base->gallivm->builder,
212 "llvm.SI.vs.load.buffer.index", uint->elem_type, NULL, 0,
213 LLVMReadNoneAttribute);
214
215 vec4_type = LLVMVectorType(base->elem_type, 4);
216 args[0] = t_list;
217 args[1] = attribute_offset;
218 args[2] = buffer_index_reg;
219 input = lp_build_intrinsic(base->gallivm->builder,
220 "llvm.SI.vs.load.input", vec4_type, args, 3);
221
222 /* Break up the vec4 into individual components */
223 for (chan = 0; chan < 4; chan++) {
224 LLVMValueRef llvm_chan = lp_build_const_int32(base->gallivm, chan);
225 /* XXX: Use a helper function for this. There is one in
226 * tgsi_llvm.c. */
227 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, chan)] =
228 LLVMBuildExtractElement(base->gallivm->builder,
229 input, llvm_chan, "");
230 }
231 }
232
233 static void declare_input_fs(
234 struct si_shader_context * si_shader_ctx,
235 unsigned input_index,
236 const struct tgsi_full_declaration *decl)
237 {
238 const char * intr_name;
239 unsigned chan;
240 struct si_shader *shader = &si_shader_ctx->shader->shader;
241 struct lp_build_context * base =
242 &si_shader_ctx->radeon_bld.soa.bld_base.base;
243 struct gallivm_state * gallivm = base->gallivm;
244 LLVMTypeRef input_type = LLVMFloatTypeInContext(gallivm->context);
245
246 /* This value is:
247 * [15:0] NewPrimMask (Bit mask for each quad. It is set it the
248 * quad begins a new primitive. Bit 0 always needs
249 * to be unset)
250 * [32:16] ParamOffset
251 *
252 */
253 LLVMValueRef params = use_sgpr(base->gallivm, SGPR_I32, SI_PS_NUM_USER_SGPR);
254 LLVMValueRef attr_number;
255
256 if (decl->Semantic.Name == TGSI_SEMANTIC_POSITION) {
257 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
258 LLVMValueRef args[1];
259 unsigned soa_index =
260 radeon_llvm_reg_index_soa(input_index, chan);
261 args[0] = lp_build_const_int32(gallivm, chan);
262 si_shader_ctx->radeon_bld.inputs[soa_index] =
263 build_intrinsic(base->gallivm->builder,
264 "llvm.SI.fs.read.pos", input_type,
265 args, 1, LLVMReadNoneAttribute);
266 }
267 return;
268 }
269
270 if (decl->Semantic.Name == TGSI_SEMANTIC_FACE) {
271 LLVMValueRef face, is_face_positive;
272
273 face = build_intrinsic(gallivm->builder,
274 "llvm.SI.fs.read.face",
275 input_type,
276 NULL, 0, LLVMReadNoneAttribute);
277 is_face_positive = LLVMBuildFCmp(gallivm->builder,
278 LLVMRealUGT, face,
279 lp_build_const_float(gallivm, 0.0f),
280 "");
281
282 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 0)] =
283 LLVMBuildSelect(gallivm->builder,
284 is_face_positive,
285 lp_build_const_float(gallivm, 1.0f),
286 lp_build_const_float(gallivm, 0.0f),
287 "");
288 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 1)] =
289 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 2)] =
290 lp_build_const_float(gallivm, 0.0f);
291 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 3)] =
292 lp_build_const_float(gallivm, 1.0f);
293
294 return;
295 }
296
297 shader->input[input_index].param_offset = shader->ninterp++;
298 attr_number = lp_build_const_int32(gallivm,
299 shader->input[input_index].param_offset);
300
301 /* XXX: Handle all possible interpolation modes */
302 switch (decl->Interp.Interpolate) {
303 case TGSI_INTERPOLATE_COLOR:
304 /* XXX: Flat shading hangs the GPU */
305 if (si_shader_ctx->rctx->queued.named.rasterizer &&
306 si_shader_ctx->rctx->queued.named.rasterizer->flatshade) {
307 #if 0
308 intr_name = "llvm.SI.fs.interp.constant";
309 #else
310 intr_name = "llvm.SI.fs.interp.linear.center";
311 #endif
312 } else {
313 if (decl->Interp.Centroid)
314 intr_name = "llvm.SI.fs.interp.persp.centroid";
315 else
316 intr_name = "llvm.SI.fs.interp.persp.center";
317 }
318 break;
319 case TGSI_INTERPOLATE_CONSTANT:
320 /* XXX: Flat shading hangs the GPU */
321 #if 0
322 intr_name = "llvm.SI.fs.interp.constant";
323 break;
324 #endif
325 case TGSI_INTERPOLATE_LINEAR:
326 if (decl->Interp.Centroid)
327 intr_name = "llvm.SI.fs.interp.linear.centroid";
328 else
329 intr_name = "llvm.SI.fs.interp.linear.center";
330 break;
331 case TGSI_INTERPOLATE_PERSPECTIVE:
332 if (decl->Interp.Centroid)
333 intr_name = "llvm.SI.fs.interp.persp.centroid";
334 else
335 intr_name = "llvm.SI.fs.interp.persp.center";
336 break;
337 default:
338 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
339 return;
340 }
341
342 if (!si_shader_ctx->ninput_emitted++) {
343 /* Enable whole quad mode */
344 lp_build_intrinsic(gallivm->builder,
345 "llvm.SI.wqm",
346 LLVMVoidTypeInContext(gallivm->context),
347 NULL, 0);
348 }
349
350 /* XXX: Could there be more than TGSI_NUM_CHANNELS (4) ? */
351 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR &&
352 si_shader_ctx->key.color_two_side) {
353 LLVMValueRef args[3];
354 LLVMValueRef face, is_face_positive;
355 LLVMValueRef back_attr_number =
356 lp_build_const_int32(gallivm,
357 shader->input[input_index].param_offset + 1);
358
359 face = build_intrinsic(gallivm->builder,
360 "llvm.SI.fs.read.face",
361 input_type,
362 NULL, 0, LLVMReadNoneAttribute);
363 is_face_positive = LLVMBuildFCmp(gallivm->builder,
364 LLVMRealUGT, face,
365 lp_build_const_float(gallivm, 0.0f),
366 "");
367
368 args[2] = params;
369 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
370 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
371 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
372 LLVMValueRef front, back;
373
374 args[0] = llvm_chan;
375 args[1] = attr_number;
376 front = build_intrinsic(base->gallivm->builder, intr_name,
377 input_type, args, 3, LLVMReadOnlyAttribute);
378
379 args[1] = back_attr_number;
380 back = build_intrinsic(base->gallivm->builder, intr_name,
381 input_type, args, 3, LLVMReadOnlyAttribute);
382
383 si_shader_ctx->radeon_bld.inputs[soa_index] =
384 LLVMBuildSelect(gallivm->builder,
385 is_face_positive,
386 front,
387 back,
388 "");
389 }
390
391 shader->ninterp++;
392 } else {
393 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
394 LLVMValueRef args[3];
395 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
396 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
397 args[0] = llvm_chan;
398 args[1] = attr_number;
399 args[2] = params;
400 si_shader_ctx->radeon_bld.inputs[soa_index] =
401 build_intrinsic(base->gallivm->builder, intr_name,
402 input_type, args, 3, LLVMReadOnlyAttribute);
403 }
404 }
405 }
406
407 static void declare_input(
408 struct radeon_llvm_context * radeon_bld,
409 unsigned input_index,
410 const struct tgsi_full_declaration *decl)
411 {
412 struct si_shader_context * si_shader_ctx =
413 si_shader_context(&radeon_bld->soa.bld_base);
414 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
415 declare_input_vs(si_shader_ctx, input_index, decl);
416 } else if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
417 declare_input_fs(si_shader_ctx, input_index, decl);
418 } else {
419 fprintf(stderr, "Warning: Unsupported shader type,\n");
420 }
421 }
422
423 static LLVMValueRef fetch_constant(
424 struct lp_build_tgsi_context * bld_base,
425 const struct tgsi_full_src_register *reg,
426 enum tgsi_opcode_type type,
427 unsigned swizzle)
428 {
429 struct lp_build_context * base = &bld_base->base;
430 unsigned idx;
431
432 LLVMValueRef const_ptr;
433 LLVMValueRef offset;
434 LLVMValueRef load;
435
436 if (swizzle == LP_CHAN_ALL) {
437 unsigned chan;
438 LLVMValueRef values[4];
439 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
440 values[chan] = fetch_constant(bld_base, reg, type, chan);
441
442 return lp_build_gather_values(bld_base->base.gallivm, values, 4);
443 }
444
445 /* currently not supported */
446 if (reg->Register.Indirect) {
447 assert(0);
448 load = lp_build_const_int32(base->gallivm, 0);
449 return bitcast(bld_base, type, load);
450 }
451
452 const_ptr = use_sgpr(base->gallivm, SGPR_CONST_PTR_F32, SI_SGPR_CONST);
453
454 /* XXX: This assumes that the constant buffer is not packed, so
455 * CONST[0].x will have an offset of 0 and CONST[1].x will have an
456 * offset of 4. */
457 idx = (reg->Register.Index * 4) + swizzle;
458
459 /* index loads above 255 are currently not supported */
460 if (idx > 255) {
461 assert(0);
462 idx = 0;
463 }
464 offset = lp_build_const_int32(base->gallivm, idx);
465
466 load = build_indexed_load(base->gallivm, const_ptr, offset);
467 return bitcast(bld_base, type, load);
468 }
469
470 /* Initialize arguments for the shader export intrinsic */
471 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
472 struct tgsi_full_declaration *d,
473 unsigned index,
474 unsigned target,
475 LLVMValueRef *args)
476 {
477 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
478 struct lp_build_context *uint =
479 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
480 struct lp_build_context *base = &bld_base->base;
481 unsigned compressed = 0;
482 unsigned chan;
483
484 if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
485 int cbuf = target - V_008DFC_SQ_EXP_MRT;
486
487 if (cbuf >= 0 && cbuf < 8) {
488 struct r600_context *rctx = si_shader_ctx->rctx;
489 compressed = (si_shader_ctx->key.export_16bpc >> cbuf) & 0x1;
490
491 if (compressed)
492 si_shader_ctx->shader->spi_shader_col_format |=
493 V_028714_SPI_SHADER_FP16_ABGR << (4 * cbuf);
494 else
495 si_shader_ctx->shader->spi_shader_col_format |=
496 V_028714_SPI_SHADER_32_ABGR << (4 * cbuf);
497 }
498 }
499
500 if (compressed) {
501 /* Pixel shader needs to pack output values before export */
502 for (chan = 0; chan < 2; chan++ ) {
503 LLVMValueRef *out_ptr =
504 si_shader_ctx->radeon_bld.soa.outputs[index];
505 args[0] = LLVMBuildLoad(base->gallivm->builder,
506 out_ptr[2 * chan], "");
507 args[1] = LLVMBuildLoad(base->gallivm->builder,
508 out_ptr[2 * chan + 1], "");
509 args[chan + 5] =
510 build_intrinsic(base->gallivm->builder,
511 "llvm.SI.packf16",
512 LLVMInt32TypeInContext(base->gallivm->context),
513 args, 2,
514 LLVMReadNoneAttribute);
515 args[chan + 7] = args[chan + 5] =
516 LLVMBuildBitCast(base->gallivm->builder,
517 args[chan + 5],
518 LLVMFloatTypeInContext(base->gallivm->context),
519 "");
520 }
521
522 /* Set COMPR flag */
523 args[4] = uint->one;
524 } else {
525 for (chan = 0; chan < 4; chan++ ) {
526 LLVMValueRef out_ptr =
527 si_shader_ctx->radeon_bld.soa.outputs[index][chan];
528 /* +5 because the first output value will be
529 * the 6th argument to the intrinsic. */
530 args[chan + 5] = LLVMBuildLoad(base->gallivm->builder,
531 out_ptr, "");
532 }
533
534 /* Clear COMPR flag */
535 args[4] = uint->zero;
536 }
537
538 /* XXX: This controls which components of the output
539 * registers actually get exported. (e.g bit 0 means export
540 * X component, bit 1 means export Y component, etc.) I'm
541 * hard coding this to 0xf for now. In the future, we might
542 * want to do something else. */
543 args[0] = lp_build_const_int32(base->gallivm, 0xf);
544
545 /* Specify whether the EXEC mask represents the valid mask */
546 args[1] = uint->zero;
547
548 /* Specify whether this is the last export */
549 args[2] = uint->zero;
550
551 /* Specify the target we are exporting */
552 args[3] = lp_build_const_int32(base->gallivm, target);
553
554 /* XXX: We probably need to keep track of the output
555 * values, so we know what we are passing to the next
556 * stage. */
557 }
558
559 static void si_llvm_emit_prologue(struct lp_build_tgsi_context *bld_base)
560 {
561 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
562 struct gallivm_state *gallivm = bld_base->base.gallivm;
563 lp_build_intrinsic_unary(gallivm->builder,
564 "llvm.AMDGPU.shader.type",
565 LLVMVoidTypeInContext(gallivm->context),
566 lp_build_const_int32(gallivm, si_shader_ctx->type));
567 }
568
569
570 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
571 unsigned index)
572 {
573 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
574 struct gallivm_state *gallivm = bld_base->base.gallivm;
575
576 if (si_shader_ctx->key.alpha_func != PIPE_FUNC_NEVER) {
577 LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][3];
578 LLVMValueRef alpha_pass =
579 lp_build_cmp(&bld_base->base,
580 si_shader_ctx->key.alpha_func,
581 LLVMBuildLoad(gallivm->builder, out_ptr, ""),
582 lp_build_const_float(gallivm, si_shader_ctx->key.alpha_ref));
583 LLVMValueRef arg =
584 lp_build_select(&bld_base->base,
585 alpha_pass,
586 lp_build_const_float(gallivm, 1.0f),
587 lp_build_const_float(gallivm, -1.0f));
588
589 build_intrinsic(gallivm->builder,
590 "llvm.AMDGPU.kill",
591 LLVMVoidTypeInContext(gallivm->context),
592 &arg, 1, 0);
593 } else {
594 build_intrinsic(gallivm->builder,
595 "llvm.AMDGPU.kilp",
596 LLVMVoidTypeInContext(gallivm->context),
597 NULL, 0, 0);
598 }
599 }
600
601 /* XXX: This is partially implemented for VS only at this point. It is not complete */
602 static void si_llvm_emit_epilogue(struct lp_build_tgsi_context * bld_base)
603 {
604 struct si_shader_context * si_shader_ctx = si_shader_context(bld_base);
605 struct si_shader * shader = &si_shader_ctx->shader->shader;
606 struct lp_build_context * base = &bld_base->base;
607 struct lp_build_context * uint =
608 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
609 struct tgsi_parse_context *parse = &si_shader_ctx->parse;
610 LLVMValueRef args[9];
611 LLVMValueRef last_args[9] = { 0 };
612 unsigned color_count = 0;
613 unsigned param_count = 0;
614 int depth_index = -1, stencil_index = -1;
615
616 while (!tgsi_parse_end_of_tokens(parse)) {
617 struct tgsi_full_declaration *d =
618 &parse->FullToken.FullDeclaration;
619 unsigned target;
620 unsigned index;
621 int i;
622
623 tgsi_parse_token(parse);
624 if (parse->FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
625 continue;
626
627 switch (d->Declaration.File) {
628 case TGSI_FILE_INPUT:
629 i = shader->ninput++;
630 shader->input[i].name = d->Semantic.Name;
631 shader->input[i].sid = d->Semantic.Index;
632 shader->input[i].interpolate = d->Interp.Interpolate;
633 shader->input[i].centroid = d->Interp.Centroid;
634 continue;
635
636 case TGSI_FILE_OUTPUT:
637 i = shader->noutput++;
638 shader->output[i].name = d->Semantic.Name;
639 shader->output[i].sid = d->Semantic.Index;
640 shader->output[i].interpolate = d->Interp.Interpolate;
641 break;
642
643 default:
644 continue;
645 }
646
647 for (index = d->Range.First; index <= d->Range.Last; index++) {
648 /* Select the correct target */
649 switch(d->Semantic.Name) {
650 case TGSI_SEMANTIC_PSIZE:
651 target = V_008DFC_SQ_EXP_POS;
652 break;
653 case TGSI_SEMANTIC_POSITION:
654 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
655 target = V_008DFC_SQ_EXP_POS;
656 break;
657 } else {
658 depth_index = index;
659 continue;
660 }
661 case TGSI_SEMANTIC_STENCIL:
662 stencil_index = index;
663 continue;
664 case TGSI_SEMANTIC_COLOR:
665 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
666 case TGSI_SEMANTIC_BCOLOR:
667 target = V_008DFC_SQ_EXP_PARAM + param_count;
668 shader->output[i].param_offset = param_count;
669 param_count++;
670 } else {
671 target = V_008DFC_SQ_EXP_MRT + color_count;
672 if (color_count == 0 &&
673 si_shader_ctx->key.alpha_func != PIPE_FUNC_ALWAYS)
674 si_alpha_test(bld_base, index);
675
676 color_count++;
677 }
678 break;
679 case TGSI_SEMANTIC_FOG:
680 case TGSI_SEMANTIC_GENERIC:
681 target = V_008DFC_SQ_EXP_PARAM + param_count;
682 shader->output[i].param_offset = param_count;
683 param_count++;
684 break;
685 default:
686 target = 0;
687 fprintf(stderr,
688 "Warning: SI unhandled output type:%d\n",
689 d->Semantic.Name);
690 }
691
692 si_llvm_init_export_args(bld_base, d, index, target, args);
693
694 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX ?
695 (d->Semantic.Name == TGSI_SEMANTIC_POSITION) :
696 (d->Semantic.Name == TGSI_SEMANTIC_COLOR)) {
697 if (last_args[0]) {
698 lp_build_intrinsic(base->gallivm->builder,
699 "llvm.SI.export",
700 LLVMVoidTypeInContext(base->gallivm->context),
701 last_args, 9);
702 }
703
704 memcpy(last_args, args, sizeof(args));
705 } else {
706 lp_build_intrinsic(base->gallivm->builder,
707 "llvm.SI.export",
708 LLVMVoidTypeInContext(base->gallivm->context),
709 args, 9);
710 }
711
712 }
713 }
714
715 if (depth_index >= 0 || stencil_index >= 0) {
716 LLVMValueRef out_ptr;
717 unsigned mask = 0;
718
719 /* Specify the target we are exporting */
720 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRTZ);
721
722 if (depth_index >= 0) {
723 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[depth_index][2];
724 args[5] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
725 mask |= 0x1;
726
727 if (stencil_index < 0) {
728 args[6] =
729 args[7] =
730 args[8] = args[5];
731 }
732 }
733
734 if (stencil_index >= 0) {
735 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[stencil_index][1];
736 args[7] =
737 args[8] =
738 args[6] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
739 mask |= 0x2;
740
741 if (depth_index < 0)
742 args[5] = args[6];
743 }
744
745 /* Specify which components to enable */
746 args[0] = lp_build_const_int32(base->gallivm, mask);
747
748 args[1] =
749 args[2] =
750 args[4] = uint->zero;
751
752 if (last_args[0])
753 lp_build_intrinsic(base->gallivm->builder,
754 "llvm.SI.export",
755 LLVMVoidTypeInContext(base->gallivm->context),
756 args, 9);
757 else
758 memcpy(last_args, args, sizeof(args));
759 }
760
761 if (!last_args[0]) {
762 assert(si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
763
764 /* Specify which components to enable */
765 last_args[0] = lp_build_const_int32(base->gallivm, 0x0);
766
767 /* Specify the target we are exporting */
768 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
769
770 /* Set COMPR flag to zero to export data as 32-bit */
771 last_args[4] = uint->zero;
772
773 /* dummy bits */
774 last_args[5]= uint->zero;
775 last_args[6]= uint->zero;
776 last_args[7]= uint->zero;
777 last_args[8]= uint->zero;
778
779 si_shader_ctx->shader->spi_shader_col_format |=
780 V_028714_SPI_SHADER_32_ABGR;
781 }
782
783 /* Specify whether the EXEC mask represents the valid mask */
784 last_args[1] = lp_build_const_int32(base->gallivm,
785 si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
786
787 /* Specify that this is the last export */
788 last_args[2] = lp_build_const_int32(base->gallivm, 1);
789
790 lp_build_intrinsic(base->gallivm->builder,
791 "llvm.SI.export",
792 LLVMVoidTypeInContext(base->gallivm->context),
793 last_args, 9);
794
795 /* XXX: Look up what this function does */
796 /* ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);*/
797 }
798
799 static void tex_fetch_args(
800 struct lp_build_tgsi_context * bld_base,
801 struct lp_build_emit_data * emit_data)
802 {
803 struct gallivm_state *gallivm = bld_base->base.gallivm;
804 const struct tgsi_full_instruction * inst = emit_data->inst;
805 unsigned opcode = inst->Instruction.Opcode;
806 unsigned target = inst->Texture.Texture;
807 LLVMValueRef ptr;
808 LLVMValueRef offset;
809 LLVMValueRef coords[4];
810 LLVMValueRef address[16];
811 unsigned count = 0;
812 unsigned chan;
813
814 /* WriteMask */
815 /* XXX: should be optimized using emit_data->inst->Dst[0].Register.WriteMask*/
816 emit_data->args[0] = lp_build_const_int32(bld_base->base.gallivm, 0xf);
817
818 /* Fetch and project texture coordinates */
819 coords[3] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
820 for (chan = 0; chan < 3; chan++ ) {
821 coords[chan] = lp_build_emit_fetch(bld_base,
822 emit_data->inst, 0,
823 chan);
824 if (opcode == TGSI_OPCODE_TXP)
825 coords[chan] = lp_build_emit_llvm_binary(bld_base,
826 TGSI_OPCODE_DIV,
827 coords[chan],
828 coords[3]);
829 }
830
831 if (opcode == TGSI_OPCODE_TXP)
832 coords[3] = bld_base->base.one;
833
834 /* Pack LOD bias value */
835 if (opcode == TGSI_OPCODE_TXB)
836 address[count++] = coords[3];
837
838 if ((target == TGSI_TEXTURE_CUBE || target == TGSI_TEXTURE_SHADOWCUBE) &&
839 opcode != TGSI_OPCODE_TXQ)
840 radeon_llvm_emit_prepare_cube_coords(bld_base, emit_data, coords);
841
842 /* Pack depth comparison value */
843 switch (target) {
844 case TGSI_TEXTURE_SHADOW1D:
845 case TGSI_TEXTURE_SHADOW1D_ARRAY:
846 case TGSI_TEXTURE_SHADOW2D:
847 case TGSI_TEXTURE_SHADOWRECT:
848 address[count++] = coords[2];
849 break;
850 case TGSI_TEXTURE_SHADOWCUBE:
851 case TGSI_TEXTURE_SHADOW2D_ARRAY:
852 address[count++] = coords[3];
853 break;
854 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
855 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, 0);
856 }
857
858 /* Pack texture coordinates */
859 address[count++] = coords[0];
860 switch (target) {
861 case TGSI_TEXTURE_2D:
862 case TGSI_TEXTURE_2D_ARRAY:
863 case TGSI_TEXTURE_3D:
864 case TGSI_TEXTURE_CUBE:
865 case TGSI_TEXTURE_RECT:
866 case TGSI_TEXTURE_SHADOW2D:
867 case TGSI_TEXTURE_SHADOWRECT:
868 case TGSI_TEXTURE_SHADOW2D_ARRAY:
869 case TGSI_TEXTURE_SHADOWCUBE:
870 case TGSI_TEXTURE_2D_MSAA:
871 case TGSI_TEXTURE_2D_ARRAY_MSAA:
872 case TGSI_TEXTURE_CUBE_ARRAY:
873 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
874 address[count++] = coords[1];
875 }
876 switch (target) {
877 case TGSI_TEXTURE_3D:
878 case TGSI_TEXTURE_CUBE:
879 case TGSI_TEXTURE_SHADOWCUBE:
880 case TGSI_TEXTURE_CUBE_ARRAY:
881 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
882 address[count++] = coords[2];
883 }
884
885 /* Pack array slice */
886 switch (target) {
887 case TGSI_TEXTURE_1D_ARRAY:
888 address[count++] = coords[1];
889 }
890 switch (target) {
891 case TGSI_TEXTURE_2D_ARRAY:
892 case TGSI_TEXTURE_2D_ARRAY_MSAA:
893 case TGSI_TEXTURE_SHADOW2D_ARRAY:
894 address[count++] = coords[2];
895 }
896 switch (target) {
897 case TGSI_TEXTURE_CUBE_ARRAY:
898 case TGSI_TEXTURE_SHADOW1D_ARRAY:
899 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
900 address[count++] = coords[3];
901 }
902
903 /* Pack LOD */
904 if (opcode == TGSI_OPCODE_TXL)
905 address[count++] = coords[3];
906
907 if (count > 16) {
908 assert(!"Cannot handle more than 16 texture address parameters");
909 count = 16;
910 }
911
912 for (chan = 0; chan < count; chan++ ) {
913 address[chan] = LLVMBuildBitCast(gallivm->builder,
914 address[chan],
915 LLVMInt32TypeInContext(gallivm->context),
916 "");
917 }
918
919 /* Pad to power of two vector */
920 while (count < util_next_power_of_two(count))
921 address[count++] = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
922
923 emit_data->args[1] = lp_build_gather_values(gallivm, address, count);
924
925 /* Resource */
926 ptr = use_sgpr(bld_base->base.gallivm, SGPR_CONST_PTR_V8I32, SI_SGPR_RESOURCE);
927 offset = lp_build_const_int32(bld_base->base.gallivm,
928 emit_data->inst->Src[1].Register.Index);
929 emit_data->args[2] = build_indexed_load(bld_base->base.gallivm,
930 ptr, offset);
931
932 /* Sampler */
933 ptr = use_sgpr(bld_base->base.gallivm, SGPR_CONST_PTR_V4I32, SI_SGPR_SAMPLER);
934 offset = lp_build_const_int32(bld_base->base.gallivm,
935 emit_data->inst->Src[1].Register.Index);
936 emit_data->args[3] = build_indexed_load(bld_base->base.gallivm,
937 ptr, offset);
938
939 /* Dimensions */
940 emit_data->args[4] = lp_build_const_int32(bld_base->base.gallivm, target);
941
942 emit_data->arg_count = 5;
943 /* XXX: To optimize, we could use a float or v2f32, if the last bits of
944 * the writemask are clear */
945 emit_data->dst_type = LLVMVectorType(
946 LLVMFloatTypeInContext(bld_base->base.gallivm->context),
947 4);
948 }
949
950 static void build_tex_intrinsic(const struct lp_build_tgsi_action * action,
951 struct lp_build_tgsi_context * bld_base,
952 struct lp_build_emit_data * emit_data)
953 {
954 struct lp_build_context * base = &bld_base->base;
955 char intr_name[23];
956
957 sprintf(intr_name, "%sv%ui32", action->intr_name,
958 LLVMGetVectorSize(LLVMTypeOf(emit_data->args[1])));
959
960 emit_data->output[emit_data->chan] = lp_build_intrinsic(
961 base->gallivm->builder, intr_name, emit_data->dst_type,
962 emit_data->args, emit_data->arg_count);
963 }
964
965 static const struct lp_build_tgsi_action tex_action = {
966 .fetch_args = tex_fetch_args,
967 .emit = build_tex_intrinsic,
968 .intr_name = "llvm.SI.sample."
969 };
970
971 static const struct lp_build_tgsi_action txb_action = {
972 .fetch_args = tex_fetch_args,
973 .emit = build_tex_intrinsic,
974 .intr_name = "llvm.SI.sampleb."
975 };
976
977 static const struct lp_build_tgsi_action txl_action = {
978 .fetch_args = tex_fetch_args,
979 .emit = build_tex_intrinsic,
980 .intr_name = "llvm.SI.samplel."
981 };
982
983
984 int si_pipe_shader_create(
985 struct pipe_context *ctx,
986 struct si_pipe_shader *shader,
987 struct si_shader_key key)
988 {
989 struct r600_context *rctx = (struct r600_context*)ctx;
990 struct si_pipe_shader_selector *sel = shader->selector;
991 struct si_shader_context si_shader_ctx;
992 struct tgsi_shader_info shader_info;
993 struct lp_build_tgsi_context * bld_base;
994 LLVMModuleRef mod;
995 unsigned char * inst_bytes;
996 unsigned inst_byte_count;
997 unsigned i;
998 uint32_t *ptr;
999 bool dump;
1000
1001 dump = debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE);
1002
1003 assert(shader->shader.noutput == 0);
1004 assert(shader->shader.ninterp == 0);
1005 assert(shader->shader.ninput == 0);
1006
1007 memset(&si_shader_ctx, 0, sizeof(si_shader_ctx));
1008 radeon_llvm_context_init(&si_shader_ctx.radeon_bld);
1009 bld_base = &si_shader_ctx.radeon_bld.soa.bld_base;
1010
1011 tgsi_scan_shader(sel->tokens, &shader_info);
1012 if (shader_info.indirect_files != 0) {
1013 fprintf(stderr, "Indirect addressing not fully handled yet\n");
1014 return -ENOSYS;
1015 }
1016
1017 shader->shader.uses_kill = shader_info.uses_kill;
1018 bld_base->info = &shader_info;
1019 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
1020 bld_base->emit_prologue = si_llvm_emit_prologue;
1021 bld_base->emit_epilogue = si_llvm_emit_epilogue;
1022
1023 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
1024 bld_base->op_actions[TGSI_OPCODE_TXB] = txb_action;
1025 bld_base->op_actions[TGSI_OPCODE_TXL] = txl_action;
1026 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
1027
1028 si_shader_ctx.radeon_bld.load_input = declare_input;
1029 si_shader_ctx.tokens = sel->tokens;
1030 tgsi_parse_init(&si_shader_ctx.parse, si_shader_ctx.tokens);
1031 si_shader_ctx.shader = shader;
1032 si_shader_ctx.key = key;
1033 si_shader_ctx.type = si_shader_ctx.parse.FullHeader.Processor.Processor;
1034 si_shader_ctx.rctx = rctx;
1035
1036 shader->shader.nr_cbufs = rctx->framebuffer.nr_cbufs;
1037
1038 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
1039 * conversion fails. */
1040 if (dump) {
1041 tgsi_dump(sel->tokens, 0);
1042 }
1043
1044 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
1045 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
1046 return -EINVAL;
1047 }
1048
1049 radeon_llvm_finalize_module(&si_shader_ctx.radeon_bld);
1050
1051 mod = bld_base->base.gallivm->module;
1052 if (dump) {
1053 LLVMDumpModule(mod);
1054 }
1055 radeon_llvm_compile(mod, &inst_bytes, &inst_byte_count, "SI", dump);
1056 if (dump) {
1057 fprintf(stderr, "SI CODE:\n");
1058 for (i = 0; i < inst_byte_count; i+=4 ) {
1059 fprintf(stderr, "%02x%02x%02x%02x\n", inst_bytes[i + 3],
1060 inst_bytes[i + 2], inst_bytes[i + 1],
1061 inst_bytes[i]);
1062 }
1063 }
1064
1065 shader->num_sgprs = util_le32_to_cpu(*(uint32_t*)inst_bytes);
1066 shader->num_vgprs = util_le32_to_cpu(*(uint32_t*)(inst_bytes + 4));
1067 shader->spi_ps_input_ena = util_le32_to_cpu(*(uint32_t*)(inst_bytes + 8));
1068
1069 radeon_llvm_dispose(&si_shader_ctx.radeon_bld);
1070 tgsi_parse_free(&si_shader_ctx.parse);
1071
1072 /* copy new shader */
1073 si_resource_reference(&shader->bo, NULL);
1074 shader->bo = si_resource_create_custom(ctx->screen, PIPE_USAGE_IMMUTABLE,
1075 inst_byte_count - 12);
1076 if (shader->bo == NULL) {
1077 return -ENOMEM;
1078 }
1079
1080 ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
1081 if (0 /*R600_BIG_ENDIAN*/) {
1082 for (i = 0; i < (inst_byte_count-12)/4; ++i) {
1083 ptr[i] = util_bswap32(*(uint32_t*)(inst_bytes+12 + i*4));
1084 }
1085 } else {
1086 memcpy(ptr, inst_bytes + 12, inst_byte_count - 12);
1087 }
1088 rctx->ws->buffer_unmap(shader->bo->cs_buf);
1089
1090 free(inst_bytes);
1091
1092 return 0;
1093 }
1094
1095 void si_pipe_shader_destroy(struct pipe_context *ctx, struct si_pipe_shader *shader)
1096 {
1097 si_resource_reference(&shader->bo, NULL);
1098 }