radeonsi: Remove incorrect (and dead) assignment in tex_fetch_args().
[mesa.git] / src / gallium / drivers / radeonsi / radeonsi_shader.c
1
2 /*
3 * Copyright 2012 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Tom Stellard <thomas.stellard@amd.com>
26 * Michel Dänzer <michel.daenzer@amd.com>
27 * Christian König <christian.koenig@amd.com>
28 */
29
30 #include "gallivm/lp_bld_tgsi_action.h"
31 #include "gallivm/lp_bld_const.h"
32 #include "gallivm/lp_bld_gather.h"
33 #include "gallivm/lp_bld_intr.h"
34 #include "gallivm/lp_bld_logic.h"
35 #include "gallivm/lp_bld_tgsi.h"
36 #include "radeon_llvm.h"
37 #include "radeon_llvm_emit.h"
38 #include "tgsi/tgsi_info.h"
39 #include "tgsi/tgsi_parse.h"
40 #include "tgsi/tgsi_scan.h"
41 #include "tgsi/tgsi_dump.h"
42
43 #include "radeonsi_pipe.h"
44 #include "radeonsi_shader.h"
45 #include "si_state.h"
46 #include "sid.h"
47
48 #include <assert.h>
49 #include <errno.h>
50 #include <stdio.h>
51
52 struct si_shader_context
53 {
54 struct radeon_llvm_context radeon_bld;
55 struct r600_context *rctx;
56 struct tgsi_parse_context parse;
57 struct tgsi_token * tokens;
58 struct si_pipe_shader *shader;
59 struct si_shader_key key;
60 unsigned type; /* TGSI_PROCESSOR_* specifies the type of shader. */
61 unsigned ninput_emitted;
62 /* struct list_head inputs; */
63 /* unsigned * input_mappings *//* From TGSI to SI hw */
64 /* struct tgsi_shader_info info;*/
65 };
66
67 static struct si_shader_context * si_shader_context(
68 struct lp_build_tgsi_context * bld_base)
69 {
70 return (struct si_shader_context *)bld_base;
71 }
72
73
74 #define PERSPECTIVE_BASE 0
75 #define LINEAR_BASE 9
76
77 #define SAMPLE_OFFSET 0
78 #define CENTER_OFFSET 2
79 #define CENTROID_OFSET 4
80
81 #define USE_SGPR_MAX_SUFFIX_LEN 5
82 #define CONST_ADDR_SPACE 2
83 #define USER_SGPR_ADDR_SPACE 8
84
85 enum sgpr_type {
86 SGPR_CONST_PTR_F32,
87 SGPR_CONST_PTR_V4I32,
88 SGPR_CONST_PTR_V8I32,
89 SGPR_I32,
90 SGPR_I64
91 };
92
93 /**
94 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad
95 *
96 * @param offset The offset parameter specifies the number of
97 * elements to offset, not the number of bytes or dwords. An element is the
98 * the type pointed to by the base_ptr parameter (e.g. int is the element of
99 * an int* pointer)
100 *
101 * When LLVM lowers the load instruction, it will convert the element offset
102 * into a dword offset automatically.
103 *
104 */
105 static LLVMValueRef build_indexed_load(
106 struct gallivm_state * gallivm,
107 LLVMValueRef base_ptr,
108 LLVMValueRef offset)
109 {
110 LLVMValueRef computed_ptr = LLVMBuildGEP(
111 gallivm->builder, base_ptr, &offset, 1, "");
112
113 return LLVMBuildLoad(gallivm->builder, computed_ptr, "");
114 }
115
116 /**
117 * Load a value stored in one of the user SGPRs
118 *
119 * @param sgpr This is the sgpr to load the value from. If you need to load a
120 * value that is stored in consecutive SGPR registers (e.g. a 64-bit pointer),
121 * then you should pass the index of the first SGPR that holds the value. For
122 * example, if you want to load a pointer that is stored in SGPRs 2 and 3, then
123 * use pass 2 for the sgpr parameter.
124 *
125 * The value of the sgpr parameter must also be aligned to the width of the type
126 * being loaded, so that the sgpr parameter is divisible by the dword width of the
127 * type. For example, if the value being loaded is two dwords wide, then the sgpr
128 * parameter must be divisible by two.
129 */
130 static LLVMValueRef use_sgpr(
131 struct gallivm_state * gallivm,
132 enum sgpr_type type,
133 unsigned sgpr)
134 {
135 LLVMValueRef sgpr_index;
136 LLVMTypeRef ret_type;
137 LLVMValueRef ptr;
138
139 sgpr_index = lp_build_const_int32(gallivm, sgpr);
140
141 switch (type) {
142 case SGPR_CONST_PTR_F32:
143 assert(sgpr % 2 == 0);
144 ret_type = LLVMFloatTypeInContext(gallivm->context);
145 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
146 break;
147
148 case SGPR_I32:
149 ret_type = LLVMInt32TypeInContext(gallivm->context);
150 break;
151
152 case SGPR_I64:
153 assert(sgpr % 2 == 0);
154 ret_type= LLVMInt64TypeInContext(gallivm->context);
155 break;
156
157 case SGPR_CONST_PTR_V4I32:
158 assert(sgpr % 2 == 0);
159 ret_type = LLVMInt32TypeInContext(gallivm->context);
160 ret_type = LLVMVectorType(ret_type, 4);
161 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
162 break;
163
164 case SGPR_CONST_PTR_V8I32:
165 assert(sgpr % 2 == 0);
166 ret_type = LLVMInt32TypeInContext(gallivm->context);
167 ret_type = LLVMVectorType(ret_type, 8);
168 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
169 break;
170
171 default:
172 assert(!"Unsupported SGPR type in use_sgpr()");
173 return NULL;
174 }
175
176 ret_type = LLVMPointerType(ret_type, USER_SGPR_ADDR_SPACE);
177 ptr = LLVMBuildIntToPtr(gallivm->builder, sgpr_index, ret_type, "");
178 return LLVMBuildLoad(gallivm->builder, ptr, "");
179 }
180
181 static void declare_input_vs(
182 struct si_shader_context * si_shader_ctx,
183 unsigned input_index,
184 const struct tgsi_full_declaration *decl)
185 {
186 LLVMValueRef t_list_ptr;
187 LLVMValueRef t_offset;
188 LLVMValueRef t_list;
189 LLVMValueRef attribute_offset;
190 LLVMValueRef buffer_index_reg;
191 LLVMValueRef args[3];
192 LLVMTypeRef vec4_type;
193 LLVMValueRef input;
194 struct lp_build_context * uint = &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
195 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
196 //struct pipe_vertex_element *velem = &rctx->vertex_elements->elements[input_index];
197 unsigned chan;
198
199 /* Load the T list */
200 t_list_ptr = use_sgpr(base->gallivm, SGPR_CONST_PTR_V4I32, SI_SGPR_VERTEX_BUFFER);
201
202 t_offset = lp_build_const_int32(base->gallivm, input_index);
203
204 t_list = build_indexed_load(base->gallivm, t_list_ptr, t_offset);
205
206 /* Build the attribute offset */
207 attribute_offset = lp_build_const_int32(base->gallivm, 0);
208
209 /* Load the buffer index is always, which is always stored in VGPR0
210 * for Vertex Shaders */
211 buffer_index_reg = build_intrinsic(base->gallivm->builder,
212 "llvm.SI.vs.load.buffer.index", uint->elem_type, NULL, 0,
213 LLVMReadNoneAttribute);
214
215 vec4_type = LLVMVectorType(base->elem_type, 4);
216 args[0] = t_list;
217 args[1] = attribute_offset;
218 args[2] = buffer_index_reg;
219 input = lp_build_intrinsic(base->gallivm->builder,
220 "llvm.SI.vs.load.input", vec4_type, args, 3);
221
222 /* Break up the vec4 into individual components */
223 for (chan = 0; chan < 4; chan++) {
224 LLVMValueRef llvm_chan = lp_build_const_int32(base->gallivm, chan);
225 /* XXX: Use a helper function for this. There is one in
226 * tgsi_llvm.c. */
227 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, chan)] =
228 LLVMBuildExtractElement(base->gallivm->builder,
229 input, llvm_chan, "");
230 }
231 }
232
233 static void declare_input_fs(
234 struct si_shader_context * si_shader_ctx,
235 unsigned input_index,
236 const struct tgsi_full_declaration *decl)
237 {
238 const char * intr_name;
239 unsigned chan;
240 struct si_shader *shader = &si_shader_ctx->shader->shader;
241 struct lp_build_context * base =
242 &si_shader_ctx->radeon_bld.soa.bld_base.base;
243 struct gallivm_state * gallivm = base->gallivm;
244 LLVMTypeRef input_type = LLVMFloatTypeInContext(gallivm->context);
245
246 /* This value is:
247 * [15:0] NewPrimMask (Bit mask for each quad. It is set it the
248 * quad begins a new primitive. Bit 0 always needs
249 * to be unset)
250 * [32:16] ParamOffset
251 *
252 */
253 LLVMValueRef params = use_sgpr(base->gallivm, SGPR_I32, SI_PS_NUM_USER_SGPR);
254 LLVMValueRef attr_number;
255
256 if (decl->Semantic.Name == TGSI_SEMANTIC_POSITION) {
257 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
258 LLVMValueRef args[1];
259 unsigned soa_index =
260 radeon_llvm_reg_index_soa(input_index, chan);
261 args[0] = lp_build_const_int32(gallivm, chan);
262 si_shader_ctx->radeon_bld.inputs[soa_index] =
263 build_intrinsic(base->gallivm->builder,
264 "llvm.SI.fs.read.pos", input_type,
265 args, 1, LLVMReadNoneAttribute);
266 }
267 return;
268 }
269
270 if (decl->Semantic.Name == TGSI_SEMANTIC_FACE) {
271 LLVMValueRef face, is_face_positive;
272
273 face = build_intrinsic(gallivm->builder,
274 "llvm.SI.fs.read.face",
275 input_type,
276 NULL, 0, LLVMReadNoneAttribute);
277 is_face_positive = LLVMBuildFCmp(gallivm->builder,
278 LLVMRealUGT, face,
279 lp_build_const_float(gallivm, 0.0f),
280 "");
281
282 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 0)] =
283 LLVMBuildSelect(gallivm->builder,
284 is_face_positive,
285 lp_build_const_float(gallivm, 1.0f),
286 lp_build_const_float(gallivm, 0.0f),
287 "");
288 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 1)] =
289 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 2)] =
290 lp_build_const_float(gallivm, 0.0f);
291 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 3)] =
292 lp_build_const_float(gallivm, 1.0f);
293
294 return;
295 }
296
297 shader->input[input_index].param_offset = shader->ninterp++;
298 attr_number = lp_build_const_int32(gallivm,
299 shader->input[input_index].param_offset);
300
301 /* XXX: Handle all possible interpolation modes */
302 switch (decl->Interp.Interpolate) {
303 case TGSI_INTERPOLATE_COLOR:
304 /* XXX: Flat shading hangs the GPU */
305 if (si_shader_ctx->rctx->queued.named.rasterizer &&
306 si_shader_ctx->rctx->queued.named.rasterizer->flatshade) {
307 #if 0
308 intr_name = "llvm.SI.fs.interp.constant";
309 #else
310 intr_name = "llvm.SI.fs.interp.linear.center";
311 #endif
312 } else {
313 if (decl->Interp.Centroid)
314 intr_name = "llvm.SI.fs.interp.persp.centroid";
315 else
316 intr_name = "llvm.SI.fs.interp.persp.center";
317 }
318 break;
319 case TGSI_INTERPOLATE_CONSTANT:
320 /* XXX: Flat shading hangs the GPU */
321 #if 0
322 intr_name = "llvm.SI.fs.interp.constant";
323 break;
324 #endif
325 case TGSI_INTERPOLATE_LINEAR:
326 if (decl->Interp.Centroid)
327 intr_name = "llvm.SI.fs.interp.linear.centroid";
328 else
329 intr_name = "llvm.SI.fs.interp.linear.center";
330 break;
331 case TGSI_INTERPOLATE_PERSPECTIVE:
332 if (decl->Interp.Centroid)
333 intr_name = "llvm.SI.fs.interp.persp.centroid";
334 else
335 intr_name = "llvm.SI.fs.interp.persp.center";
336 break;
337 default:
338 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
339 return;
340 }
341
342 if (!si_shader_ctx->ninput_emitted++) {
343 /* Enable whole quad mode */
344 lp_build_intrinsic(gallivm->builder,
345 "llvm.SI.wqm",
346 LLVMVoidTypeInContext(gallivm->context),
347 NULL, 0);
348 }
349
350 /* XXX: Could there be more than TGSI_NUM_CHANNELS (4) ? */
351 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR &&
352 si_shader_ctx->key.color_two_side) {
353 LLVMValueRef args[3];
354 LLVMValueRef face, is_face_positive;
355 LLVMValueRef back_attr_number =
356 lp_build_const_int32(gallivm,
357 shader->input[input_index].param_offset + 1);
358
359 face = build_intrinsic(gallivm->builder,
360 "llvm.SI.fs.read.face",
361 input_type,
362 NULL, 0, LLVMReadNoneAttribute);
363 is_face_positive = LLVMBuildFCmp(gallivm->builder,
364 LLVMRealUGT, face,
365 lp_build_const_float(gallivm, 0.0f),
366 "");
367
368 args[2] = params;
369 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
370 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
371 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
372 LLVMValueRef front, back;
373
374 args[0] = llvm_chan;
375 args[1] = attr_number;
376 front = build_intrinsic(base->gallivm->builder, intr_name,
377 input_type, args, 3, LLVMReadOnlyAttribute);
378
379 args[1] = back_attr_number;
380 back = build_intrinsic(base->gallivm->builder, intr_name,
381 input_type, args, 3, LLVMReadOnlyAttribute);
382
383 si_shader_ctx->radeon_bld.inputs[soa_index] =
384 LLVMBuildSelect(gallivm->builder,
385 is_face_positive,
386 front,
387 back,
388 "");
389 }
390
391 shader->ninterp++;
392 } else {
393 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
394 LLVMValueRef args[3];
395 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
396 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
397 args[0] = llvm_chan;
398 args[1] = attr_number;
399 args[2] = params;
400 si_shader_ctx->radeon_bld.inputs[soa_index] =
401 build_intrinsic(base->gallivm->builder, intr_name,
402 input_type, args, 3, LLVMReadOnlyAttribute);
403 }
404 }
405 }
406
407 static void declare_input(
408 struct radeon_llvm_context * radeon_bld,
409 unsigned input_index,
410 const struct tgsi_full_declaration *decl)
411 {
412 struct si_shader_context * si_shader_ctx =
413 si_shader_context(&radeon_bld->soa.bld_base);
414 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
415 declare_input_vs(si_shader_ctx, input_index, decl);
416 } else if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
417 declare_input_fs(si_shader_ctx, input_index, decl);
418 } else {
419 fprintf(stderr, "Warning: Unsupported shader type,\n");
420 }
421 }
422
423 static LLVMValueRef fetch_constant(
424 struct lp_build_tgsi_context * bld_base,
425 const struct tgsi_full_src_register *reg,
426 enum tgsi_opcode_type type,
427 unsigned swizzle)
428 {
429 struct lp_build_context * base = &bld_base->base;
430 unsigned idx;
431
432 LLVMValueRef const_ptr;
433 LLVMValueRef offset;
434 LLVMValueRef load;
435
436 /* currently not supported */
437 if (reg->Register.Indirect) {
438 assert(0);
439 load = lp_build_const_int32(base->gallivm, 0);
440 return bitcast(bld_base, type, load);
441 }
442
443 const_ptr = use_sgpr(base->gallivm, SGPR_CONST_PTR_F32, SI_SGPR_CONST);
444
445 /* XXX: This assumes that the constant buffer is not packed, so
446 * CONST[0].x will have an offset of 0 and CONST[1].x will have an
447 * offset of 4. */
448 idx = (reg->Register.Index * 4) + swizzle;
449
450 /* index loads above 255 are currently not supported */
451 if (idx > 255) {
452 assert(0);
453 idx = 0;
454 }
455 offset = lp_build_const_int32(base->gallivm, idx);
456
457 load = build_indexed_load(base->gallivm, const_ptr, offset);
458 return bitcast(bld_base, type, load);
459 }
460
461 /* Initialize arguments for the shader export intrinsic */
462 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
463 struct tgsi_full_declaration *d,
464 unsigned index,
465 unsigned target,
466 LLVMValueRef *args)
467 {
468 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
469 struct lp_build_context *uint =
470 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
471 struct lp_build_context *base = &bld_base->base;
472 unsigned compressed = 0;
473 unsigned chan;
474
475 if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
476 int cbuf = target - V_008DFC_SQ_EXP_MRT;
477
478 if (cbuf >= 0 && cbuf < 8) {
479 struct r600_context *rctx = si_shader_ctx->rctx;
480 compressed = (si_shader_ctx->key.export_16bpc >> cbuf) & 0x1;
481
482 if (compressed)
483 si_shader_ctx->shader->spi_shader_col_format |=
484 V_028714_SPI_SHADER_FP16_ABGR << (4 * cbuf);
485 else
486 si_shader_ctx->shader->spi_shader_col_format |=
487 V_028714_SPI_SHADER_32_ABGR << (4 * cbuf);
488 }
489 }
490
491 if (compressed) {
492 /* Pixel shader needs to pack output values before export */
493 for (chan = 0; chan < 2; chan++ ) {
494 LLVMValueRef *out_ptr =
495 si_shader_ctx->radeon_bld.soa.outputs[index];
496 args[0] = LLVMBuildLoad(base->gallivm->builder,
497 out_ptr[2 * chan], "");
498 args[1] = LLVMBuildLoad(base->gallivm->builder,
499 out_ptr[2 * chan + 1], "");
500 args[chan + 5] =
501 build_intrinsic(base->gallivm->builder,
502 "llvm.SI.packf16",
503 LLVMInt32TypeInContext(base->gallivm->context),
504 args, 2,
505 LLVMReadNoneAttribute);
506 args[chan + 7] = args[chan + 5] =
507 LLVMBuildBitCast(base->gallivm->builder,
508 args[chan + 5],
509 LLVMFloatTypeInContext(base->gallivm->context),
510 "");
511 }
512
513 /* Set COMPR flag */
514 args[4] = uint->one;
515 } else {
516 for (chan = 0; chan < 4; chan++ ) {
517 LLVMValueRef out_ptr =
518 si_shader_ctx->radeon_bld.soa.outputs[index][chan];
519 /* +5 because the first output value will be
520 * the 6th argument to the intrinsic. */
521 args[chan + 5] = LLVMBuildLoad(base->gallivm->builder,
522 out_ptr, "");
523 }
524
525 /* Clear COMPR flag */
526 args[4] = uint->zero;
527 }
528
529 /* XXX: This controls which components of the output
530 * registers actually get exported. (e.g bit 0 means export
531 * X component, bit 1 means export Y component, etc.) I'm
532 * hard coding this to 0xf for now. In the future, we might
533 * want to do something else. */
534 args[0] = lp_build_const_int32(base->gallivm, 0xf);
535
536 /* Specify whether the EXEC mask represents the valid mask */
537 args[1] = uint->zero;
538
539 /* Specify whether this is the last export */
540 args[2] = uint->zero;
541
542 /* Specify the target we are exporting */
543 args[3] = lp_build_const_int32(base->gallivm, target);
544
545 /* XXX: We probably need to keep track of the output
546 * values, so we know what we are passing to the next
547 * stage. */
548 }
549
550 static void si_llvm_emit_prologue(struct lp_build_tgsi_context *bld_base)
551 {
552 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
553 struct gallivm_state *gallivm = bld_base->base.gallivm;
554 lp_build_intrinsic_unary(gallivm->builder,
555 "llvm.AMDGPU.shader.type",
556 LLVMVoidTypeInContext(gallivm->context),
557 lp_build_const_int32(gallivm, si_shader_ctx->type));
558 }
559
560
561 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
562 unsigned index)
563 {
564 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
565 struct gallivm_state *gallivm = bld_base->base.gallivm;
566
567 if (si_shader_ctx->key.alpha_func != PIPE_FUNC_NEVER) {
568 LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][3];
569 LLVMValueRef alpha_pass =
570 lp_build_cmp(&bld_base->base,
571 si_shader_ctx->key.alpha_func,
572 LLVMBuildLoad(gallivm->builder, out_ptr, ""),
573 lp_build_const_float(gallivm, si_shader_ctx->key.alpha_ref));
574 LLVMValueRef arg =
575 lp_build_select(&bld_base->base,
576 alpha_pass,
577 lp_build_const_float(gallivm, 1.0f),
578 lp_build_const_float(gallivm, -1.0f));
579
580 build_intrinsic(gallivm->builder,
581 "llvm.AMDGPU.kill",
582 LLVMVoidTypeInContext(gallivm->context),
583 &arg, 1, 0);
584 } else {
585 build_intrinsic(gallivm->builder,
586 "llvm.AMDGPU.kilp",
587 LLVMVoidTypeInContext(gallivm->context),
588 NULL, 0, 0);
589 }
590 }
591
592 /* XXX: This is partially implemented for VS only at this point. It is not complete */
593 static void si_llvm_emit_epilogue(struct lp_build_tgsi_context * bld_base)
594 {
595 struct si_shader_context * si_shader_ctx = si_shader_context(bld_base);
596 struct si_shader * shader = &si_shader_ctx->shader->shader;
597 struct lp_build_context * base = &bld_base->base;
598 struct lp_build_context * uint =
599 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
600 struct tgsi_parse_context *parse = &si_shader_ctx->parse;
601 LLVMValueRef args[9];
602 LLVMValueRef last_args[9] = { 0 };
603 unsigned color_count = 0;
604 unsigned param_count = 0;
605 int depth_index = -1, stencil_index = -1;
606
607 while (!tgsi_parse_end_of_tokens(parse)) {
608 struct tgsi_full_declaration *d =
609 &parse->FullToken.FullDeclaration;
610 unsigned target;
611 unsigned index;
612 int i;
613
614 tgsi_parse_token(parse);
615 if (parse->FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
616 continue;
617
618 switch (d->Declaration.File) {
619 case TGSI_FILE_INPUT:
620 i = shader->ninput++;
621 shader->input[i].name = d->Semantic.Name;
622 shader->input[i].sid = d->Semantic.Index;
623 shader->input[i].interpolate = d->Interp.Interpolate;
624 shader->input[i].centroid = d->Interp.Centroid;
625 continue;
626
627 case TGSI_FILE_OUTPUT:
628 i = shader->noutput++;
629 shader->output[i].name = d->Semantic.Name;
630 shader->output[i].sid = d->Semantic.Index;
631 shader->output[i].interpolate = d->Interp.Interpolate;
632 break;
633
634 default:
635 continue;
636 }
637
638 for (index = d->Range.First; index <= d->Range.Last; index++) {
639 /* Select the correct target */
640 switch(d->Semantic.Name) {
641 case TGSI_SEMANTIC_PSIZE:
642 target = V_008DFC_SQ_EXP_POS;
643 break;
644 case TGSI_SEMANTIC_POSITION:
645 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
646 target = V_008DFC_SQ_EXP_POS;
647 break;
648 } else {
649 depth_index = index;
650 continue;
651 }
652 case TGSI_SEMANTIC_STENCIL:
653 stencil_index = index;
654 continue;
655 case TGSI_SEMANTIC_COLOR:
656 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
657 case TGSI_SEMANTIC_BCOLOR:
658 target = V_008DFC_SQ_EXP_PARAM + param_count;
659 shader->output[i].param_offset = param_count;
660 param_count++;
661 } else {
662 target = V_008DFC_SQ_EXP_MRT + color_count;
663 if (color_count == 0 &&
664 si_shader_ctx->key.alpha_func != PIPE_FUNC_ALWAYS)
665 si_alpha_test(bld_base, index);
666
667 color_count++;
668 }
669 break;
670 case TGSI_SEMANTIC_FOG:
671 case TGSI_SEMANTIC_GENERIC:
672 target = V_008DFC_SQ_EXP_PARAM + param_count;
673 shader->output[i].param_offset = param_count;
674 param_count++;
675 break;
676 default:
677 target = 0;
678 fprintf(stderr,
679 "Warning: SI unhandled output type:%d\n",
680 d->Semantic.Name);
681 }
682
683 si_llvm_init_export_args(bld_base, d, index, target, args);
684
685 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX ?
686 (d->Semantic.Name == TGSI_SEMANTIC_POSITION) :
687 (d->Semantic.Name == TGSI_SEMANTIC_COLOR)) {
688 if (last_args[0]) {
689 lp_build_intrinsic(base->gallivm->builder,
690 "llvm.SI.export",
691 LLVMVoidTypeInContext(base->gallivm->context),
692 last_args, 9);
693 }
694
695 memcpy(last_args, args, sizeof(args));
696 } else {
697 lp_build_intrinsic(base->gallivm->builder,
698 "llvm.SI.export",
699 LLVMVoidTypeInContext(base->gallivm->context),
700 args, 9);
701 }
702
703 }
704 }
705
706 if (depth_index >= 0 || stencil_index >= 0) {
707 LLVMValueRef out_ptr;
708 unsigned mask = 0;
709
710 /* Specify the target we are exporting */
711 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRTZ);
712
713 if (depth_index >= 0) {
714 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[depth_index][2];
715 args[5] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
716 mask |= 0x1;
717
718 if (stencil_index < 0) {
719 args[6] =
720 args[7] =
721 args[8] = args[5];
722 }
723 }
724
725 if (stencil_index >= 0) {
726 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[stencil_index][1];
727 args[7] =
728 args[8] =
729 args[6] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
730 mask |= 0x2;
731
732 if (depth_index < 0)
733 args[5] = args[6];
734 }
735
736 /* Specify which components to enable */
737 args[0] = lp_build_const_int32(base->gallivm, mask);
738
739 args[1] =
740 args[2] =
741 args[4] = uint->zero;
742
743 if (last_args[0])
744 lp_build_intrinsic(base->gallivm->builder,
745 "llvm.SI.export",
746 LLVMVoidTypeInContext(base->gallivm->context),
747 args, 9);
748 else
749 memcpy(last_args, args, sizeof(args));
750 }
751
752 if (!last_args[0]) {
753 assert(si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
754
755 /* Specify which components to enable */
756 last_args[0] = lp_build_const_int32(base->gallivm, 0x0);
757
758 /* Specify the target we are exporting */
759 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
760
761 /* Set COMPR flag to zero to export data as 32-bit */
762 last_args[4] = uint->zero;
763
764 /* dummy bits */
765 last_args[5]= uint->zero;
766 last_args[6]= uint->zero;
767 last_args[7]= uint->zero;
768 last_args[8]= uint->zero;
769
770 si_shader_ctx->shader->spi_shader_col_format |=
771 V_028714_SPI_SHADER_32_ABGR;
772 }
773
774 /* Specify whether the EXEC mask represents the valid mask */
775 last_args[1] = lp_build_const_int32(base->gallivm,
776 si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
777
778 /* Specify that this is the last export */
779 last_args[2] = lp_build_const_int32(base->gallivm, 1);
780
781 lp_build_intrinsic(base->gallivm->builder,
782 "llvm.SI.export",
783 LLVMVoidTypeInContext(base->gallivm->context),
784 last_args, 9);
785
786 /* XXX: Look up what this function does */
787 /* ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);*/
788 }
789
790 static void tex_fetch_args(
791 struct lp_build_tgsi_context * bld_base,
792 struct lp_build_emit_data * emit_data)
793 {
794 struct gallivm_state *gallivm = bld_base->base.gallivm;
795 const struct tgsi_full_instruction * inst = emit_data->inst;
796 unsigned opcode = inst->Instruction.Opcode;
797 unsigned target = inst->Texture.Texture;
798 LLVMValueRef ptr;
799 LLVMValueRef offset;
800 LLVMValueRef coords[4];
801 LLVMValueRef address[16];
802 unsigned count = 0;
803 unsigned chan;
804
805 /* WriteMask */
806 /* XXX: should be optimized using emit_data->inst->Dst[0].Register.WriteMask*/
807 emit_data->args[0] = lp_build_const_int32(bld_base->base.gallivm, 0xf);
808
809 /* Fetch and project texture coordinates */
810 coords[3] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
811 for (chan = 0; chan < 3; chan++ ) {
812 coords[chan] = lp_build_emit_fetch(bld_base,
813 emit_data->inst, 0,
814 chan);
815 if (opcode == TGSI_OPCODE_TXP)
816 coords[chan] = lp_build_emit_llvm_binary(bld_base,
817 TGSI_OPCODE_DIV,
818 coords[chan],
819 coords[3]);
820 }
821
822 if (opcode == TGSI_OPCODE_TXP)
823 coords[3] = bld_base->base.one;
824
825 /* Pack LOD bias value */
826 if (opcode == TGSI_OPCODE_TXB)
827 address[count++] = coords[3];
828
829 if ((target == TGSI_TEXTURE_CUBE || target == TGSI_TEXTURE_SHADOWCUBE) &&
830 opcode != TGSI_OPCODE_TXQ)
831 radeon_llvm_emit_prepare_cube_coords(bld_base, emit_data, coords);
832
833 /* Pack depth comparison value */
834 switch (target) {
835 case TGSI_TEXTURE_SHADOW1D:
836 case TGSI_TEXTURE_SHADOW1D_ARRAY:
837 case TGSI_TEXTURE_SHADOW2D:
838 case TGSI_TEXTURE_SHADOWRECT:
839 address[count++] = coords[2];
840 break;
841 case TGSI_TEXTURE_SHADOWCUBE:
842 case TGSI_TEXTURE_SHADOW2D_ARRAY:
843 address[count++] = coords[3];
844 break;
845 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
846 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, 0);
847 }
848
849 /* Pack texture coordinates */
850 address[count++] = coords[0];
851 switch (target) {
852 case TGSI_TEXTURE_2D:
853 case TGSI_TEXTURE_2D_ARRAY:
854 case TGSI_TEXTURE_3D:
855 case TGSI_TEXTURE_CUBE:
856 case TGSI_TEXTURE_RECT:
857 case TGSI_TEXTURE_SHADOW2D:
858 case TGSI_TEXTURE_SHADOWRECT:
859 case TGSI_TEXTURE_SHADOW2D_ARRAY:
860 case TGSI_TEXTURE_SHADOWCUBE:
861 case TGSI_TEXTURE_2D_MSAA:
862 case TGSI_TEXTURE_2D_ARRAY_MSAA:
863 case TGSI_TEXTURE_CUBE_ARRAY:
864 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
865 address[count++] = coords[1];
866 }
867 switch (target) {
868 case TGSI_TEXTURE_3D:
869 case TGSI_TEXTURE_CUBE:
870 case TGSI_TEXTURE_SHADOWCUBE:
871 case TGSI_TEXTURE_CUBE_ARRAY:
872 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
873 address[count++] = coords[2];
874 }
875
876 /* Pack array slice */
877 switch (target) {
878 case TGSI_TEXTURE_1D_ARRAY:
879 address[count++] = coords[1];
880 }
881 switch (target) {
882 case TGSI_TEXTURE_2D_ARRAY:
883 case TGSI_TEXTURE_2D_ARRAY_MSAA:
884 case TGSI_TEXTURE_SHADOW2D_ARRAY:
885 address[count++] = coords[2];
886 }
887 switch (target) {
888 case TGSI_TEXTURE_CUBE_ARRAY:
889 case TGSI_TEXTURE_SHADOW1D_ARRAY:
890 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
891 address[count++] = coords[3];
892 }
893
894 /* Pack LOD */
895 if (opcode == TGSI_OPCODE_TXL)
896 address[count++] = coords[3];
897
898 if (count > 16) {
899 assert(!"Cannot handle more than 16 texture address parameters");
900 count = 16;
901 }
902
903 for (chan = 0; chan < count; chan++ ) {
904 address[chan] = LLVMBuildBitCast(gallivm->builder,
905 address[chan],
906 LLVMInt32TypeInContext(gallivm->context),
907 "");
908 }
909
910 /* Pad to power of two vector */
911 while (count < util_next_power_of_two(count))
912 address[count++] = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
913
914 emit_data->args[1] = lp_build_gather_values(gallivm, address, count);
915
916 /* Resource */
917 ptr = use_sgpr(bld_base->base.gallivm, SGPR_CONST_PTR_V8I32, SI_SGPR_RESOURCE);
918 offset = lp_build_const_int32(bld_base->base.gallivm,
919 emit_data->inst->Src[1].Register.Index);
920 emit_data->args[2] = build_indexed_load(bld_base->base.gallivm,
921 ptr, offset);
922
923 /* Sampler */
924 ptr = use_sgpr(bld_base->base.gallivm, SGPR_CONST_PTR_V4I32, SI_SGPR_SAMPLER);
925 offset = lp_build_const_int32(bld_base->base.gallivm,
926 emit_data->inst->Src[1].Register.Index);
927 emit_data->args[3] = build_indexed_load(bld_base->base.gallivm,
928 ptr, offset);
929
930 /* Dimensions */
931 emit_data->args[4] = lp_build_const_int32(bld_base->base.gallivm, target);
932
933 emit_data->arg_count = 5;
934 /* XXX: To optimize, we could use a float or v2f32, if the last bits of
935 * the writemask are clear */
936 emit_data->dst_type = LLVMVectorType(
937 LLVMFloatTypeInContext(bld_base->base.gallivm->context),
938 4);
939 }
940
941 static void build_tex_intrinsic(const struct lp_build_tgsi_action * action,
942 struct lp_build_tgsi_context * bld_base,
943 struct lp_build_emit_data * emit_data)
944 {
945 struct lp_build_context * base = &bld_base->base;
946 char intr_name[23];
947
948 sprintf(intr_name, "%sv%ui32", action->intr_name,
949 LLVMGetVectorSize(LLVMTypeOf(emit_data->args[1])));
950
951 emit_data->output[emit_data->chan] = lp_build_intrinsic(
952 base->gallivm->builder, intr_name, emit_data->dst_type,
953 emit_data->args, emit_data->arg_count);
954 }
955
956 static const struct lp_build_tgsi_action tex_action = {
957 .fetch_args = tex_fetch_args,
958 .emit = build_tex_intrinsic,
959 .intr_name = "llvm.SI.sample."
960 };
961
962 static const struct lp_build_tgsi_action txb_action = {
963 .fetch_args = tex_fetch_args,
964 .emit = build_tex_intrinsic,
965 .intr_name = "llvm.SI.sampleb."
966 };
967
968 static const struct lp_build_tgsi_action txl_action = {
969 .fetch_args = tex_fetch_args,
970 .emit = build_tex_intrinsic,
971 .intr_name = "llvm.SI.samplel."
972 };
973
974
975 int si_pipe_shader_create(
976 struct pipe_context *ctx,
977 struct si_pipe_shader *shader,
978 struct si_shader_key key)
979 {
980 struct r600_context *rctx = (struct r600_context*)ctx;
981 struct si_pipe_shader_selector *sel = shader->selector;
982 struct si_shader_context si_shader_ctx;
983 struct tgsi_shader_info shader_info;
984 struct lp_build_tgsi_context * bld_base;
985 LLVMModuleRef mod;
986 unsigned char * inst_bytes;
987 unsigned inst_byte_count;
988 unsigned i;
989 uint32_t *ptr;
990 bool dump;
991
992 dump = debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE);
993
994 assert(shader->shader.noutput == 0);
995 assert(shader->shader.ninterp == 0);
996 assert(shader->shader.ninput == 0);
997
998 memset(&si_shader_ctx, 0, sizeof(si_shader_ctx));
999 radeon_llvm_context_init(&si_shader_ctx.radeon_bld);
1000 bld_base = &si_shader_ctx.radeon_bld.soa.bld_base;
1001
1002 tgsi_scan_shader(sel->tokens, &shader_info);
1003 if (shader_info.indirect_files != 0) {
1004 fprintf(stderr, "Indirect addressing not fully handled yet\n");
1005 return -ENOSYS;
1006 }
1007
1008 shader->shader.uses_kill = shader_info.uses_kill;
1009 bld_base->info = &shader_info;
1010 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
1011 bld_base->emit_prologue = si_llvm_emit_prologue;
1012 bld_base->emit_epilogue = si_llvm_emit_epilogue;
1013
1014 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
1015 bld_base->op_actions[TGSI_OPCODE_TXB] = txb_action;
1016 bld_base->op_actions[TGSI_OPCODE_TXL] = txl_action;
1017 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
1018
1019 si_shader_ctx.radeon_bld.load_input = declare_input;
1020 si_shader_ctx.tokens = sel->tokens;
1021 tgsi_parse_init(&si_shader_ctx.parse, si_shader_ctx.tokens);
1022 si_shader_ctx.shader = shader;
1023 si_shader_ctx.key = key;
1024 si_shader_ctx.type = si_shader_ctx.parse.FullHeader.Processor.Processor;
1025 si_shader_ctx.rctx = rctx;
1026
1027 shader->shader.nr_cbufs = rctx->framebuffer.nr_cbufs;
1028
1029 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
1030 * conversion fails. */
1031 if (dump) {
1032 tgsi_dump(sel->tokens, 0);
1033 }
1034
1035 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
1036 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
1037 return -EINVAL;
1038 }
1039
1040 radeon_llvm_finalize_module(&si_shader_ctx.radeon_bld);
1041
1042 mod = bld_base->base.gallivm->module;
1043 if (dump) {
1044 LLVMDumpModule(mod);
1045 }
1046 radeon_llvm_compile(mod, &inst_bytes, &inst_byte_count, "SI", dump);
1047 if (dump) {
1048 fprintf(stderr, "SI CODE:\n");
1049 for (i = 0; i < inst_byte_count; i+=4 ) {
1050 fprintf(stderr, "%02x%02x%02x%02x\n", inst_bytes[i + 3],
1051 inst_bytes[i + 2], inst_bytes[i + 1],
1052 inst_bytes[i]);
1053 }
1054 }
1055
1056 shader->num_sgprs = util_le32_to_cpu(*(uint32_t*)inst_bytes);
1057 shader->num_vgprs = util_le32_to_cpu(*(uint32_t*)(inst_bytes + 4));
1058 shader->spi_ps_input_ena = util_le32_to_cpu(*(uint32_t*)(inst_bytes + 8));
1059
1060 radeon_llvm_dispose(&si_shader_ctx.radeon_bld);
1061 tgsi_parse_free(&si_shader_ctx.parse);
1062
1063 /* copy new shader */
1064 si_resource_reference(&shader->bo, NULL);
1065 shader->bo = si_resource_create_custom(ctx->screen, PIPE_USAGE_IMMUTABLE,
1066 inst_byte_count - 12);
1067 if (shader->bo == NULL) {
1068 return -ENOMEM;
1069 }
1070
1071 ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
1072 if (0 /*R600_BIG_ENDIAN*/) {
1073 for (i = 0; i < (inst_byte_count-12)/4; ++i) {
1074 ptr[i] = util_bswap32(*(uint32_t*)(inst_bytes+12 + i*4));
1075 }
1076 } else {
1077 memcpy(ptr, inst_bytes + 12, inst_byte_count - 12);
1078 }
1079 rctx->ws->buffer_unmap(shader->bo->cs_buf);
1080
1081 free(inst_bytes);
1082
1083 return 0;
1084 }
1085
1086 void si_pipe_shader_destroy(struct pipe_context *ctx, struct si_pipe_shader *shader)
1087 {
1088 si_resource_reference(&shader->bo, NULL);
1089 }