radeonsi: Fall back to dummy pixel shader instead of trying indirect addressing.
[mesa.git] / src / gallium / drivers / radeonsi / radeonsi_shader.c
1
2 /*
3 * Copyright 2012 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Tom Stellard <thomas.stellard@amd.com>
26 * Michel Dänzer <michel.daenzer@amd.com>
27 * Christian König <christian.koenig@amd.com>
28 */
29
30 #include "gallivm/lp_bld_tgsi_action.h"
31 #include "gallivm/lp_bld_const.h"
32 #include "gallivm/lp_bld_gather.h"
33 #include "gallivm/lp_bld_intr.h"
34 #include "gallivm/lp_bld_logic.h"
35 #include "gallivm/lp_bld_tgsi.h"
36 #include "radeon_llvm.h"
37 #include "radeon_llvm_emit.h"
38 #include "tgsi/tgsi_info.h"
39 #include "tgsi/tgsi_parse.h"
40 #include "tgsi/tgsi_scan.h"
41 #include "tgsi/tgsi_dump.h"
42
43 #include "radeonsi_pipe.h"
44 #include "radeonsi_shader.h"
45 #include "si_state.h"
46 #include "sid.h"
47
48 #include <assert.h>
49 #include <errno.h>
50 #include <stdio.h>
51
52 struct si_shader_context
53 {
54 struct radeon_llvm_context radeon_bld;
55 struct r600_context *rctx;
56 struct tgsi_parse_context parse;
57 struct tgsi_token * tokens;
58 struct si_pipe_shader *shader;
59 struct si_shader_key key;
60 unsigned type; /* TGSI_PROCESSOR_* specifies the type of shader. */
61 unsigned ninput_emitted;
62 /* struct list_head inputs; */
63 /* unsigned * input_mappings *//* From TGSI to SI hw */
64 /* struct tgsi_shader_info info;*/
65 };
66
67 static struct si_shader_context * si_shader_context(
68 struct lp_build_tgsi_context * bld_base)
69 {
70 return (struct si_shader_context *)bld_base;
71 }
72
73
74 #define PERSPECTIVE_BASE 0
75 #define LINEAR_BASE 9
76
77 #define SAMPLE_OFFSET 0
78 #define CENTER_OFFSET 2
79 #define CENTROID_OFSET 4
80
81 #define USE_SGPR_MAX_SUFFIX_LEN 5
82 #define CONST_ADDR_SPACE 2
83 #define USER_SGPR_ADDR_SPACE 8
84
85 enum sgpr_type {
86 SGPR_CONST_PTR_F32,
87 SGPR_CONST_PTR_V4I32,
88 SGPR_CONST_PTR_V8I32,
89 SGPR_I32,
90 SGPR_I64
91 };
92
93 /**
94 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad
95 *
96 * @param offset The offset parameter specifies the number of
97 * elements to offset, not the number of bytes or dwords. An element is the
98 * the type pointed to by the base_ptr parameter (e.g. int is the element of
99 * an int* pointer)
100 *
101 * When LLVM lowers the load instruction, it will convert the element offset
102 * into a dword offset automatically.
103 *
104 */
105 static LLVMValueRef build_indexed_load(
106 struct gallivm_state * gallivm,
107 LLVMValueRef base_ptr,
108 LLVMValueRef offset)
109 {
110 LLVMValueRef computed_ptr = LLVMBuildGEP(
111 gallivm->builder, base_ptr, &offset, 1, "");
112
113 return LLVMBuildLoad(gallivm->builder, computed_ptr, "");
114 }
115
116 /**
117 * Load a value stored in one of the user SGPRs
118 *
119 * @param sgpr This is the sgpr to load the value from. If you need to load a
120 * value that is stored in consecutive SGPR registers (e.g. a 64-bit pointer),
121 * then you should pass the index of the first SGPR that holds the value. For
122 * example, if you want to load a pointer that is stored in SGPRs 2 and 3, then
123 * use pass 2 for the sgpr parameter.
124 *
125 * The value of the sgpr parameter must also be aligned to the width of the type
126 * being loaded, so that the sgpr parameter is divisible by the dword width of the
127 * type. For example, if the value being loaded is two dwords wide, then the sgpr
128 * parameter must be divisible by two.
129 */
130 static LLVMValueRef use_sgpr(
131 struct gallivm_state * gallivm,
132 enum sgpr_type type,
133 unsigned sgpr)
134 {
135 LLVMValueRef sgpr_index;
136 LLVMTypeRef ret_type;
137 LLVMValueRef ptr;
138
139 sgpr_index = lp_build_const_int32(gallivm, sgpr);
140
141 switch (type) {
142 case SGPR_CONST_PTR_F32:
143 assert(sgpr % 2 == 0);
144 ret_type = LLVMFloatTypeInContext(gallivm->context);
145 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
146 break;
147
148 case SGPR_I32:
149 ret_type = LLVMInt32TypeInContext(gallivm->context);
150 break;
151
152 case SGPR_I64:
153 assert(sgpr % 2 == 0);
154 ret_type= LLVMInt64TypeInContext(gallivm->context);
155 break;
156
157 case SGPR_CONST_PTR_V4I32:
158 assert(sgpr % 2 == 0);
159 ret_type = LLVMInt32TypeInContext(gallivm->context);
160 ret_type = LLVMVectorType(ret_type, 4);
161 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
162 break;
163
164 case SGPR_CONST_PTR_V8I32:
165 assert(sgpr % 2 == 0);
166 ret_type = LLVMInt32TypeInContext(gallivm->context);
167 ret_type = LLVMVectorType(ret_type, 8);
168 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
169 break;
170
171 default:
172 assert(!"Unsupported SGPR type in use_sgpr()");
173 return NULL;
174 }
175
176 ret_type = LLVMPointerType(ret_type, USER_SGPR_ADDR_SPACE);
177 ptr = LLVMBuildIntToPtr(gallivm->builder, sgpr_index, ret_type, "");
178 return LLVMBuildLoad(gallivm->builder, ptr, "");
179 }
180
181 static void declare_input_vs(
182 struct si_shader_context * si_shader_ctx,
183 unsigned input_index,
184 const struct tgsi_full_declaration *decl)
185 {
186 LLVMValueRef t_list_ptr;
187 LLVMValueRef t_offset;
188 LLVMValueRef t_list;
189 LLVMValueRef attribute_offset;
190 LLVMValueRef buffer_index_reg;
191 LLVMValueRef args[3];
192 LLVMTypeRef vec4_type;
193 LLVMValueRef input;
194 struct lp_build_context * uint = &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
195 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
196 //struct pipe_vertex_element *velem = &rctx->vertex_elements->elements[input_index];
197 unsigned chan;
198
199 /* Load the T list */
200 t_list_ptr = use_sgpr(base->gallivm, SGPR_CONST_PTR_V4I32, SI_SGPR_VERTEX_BUFFER);
201
202 t_offset = lp_build_const_int32(base->gallivm, input_index);
203
204 t_list = build_indexed_load(base->gallivm, t_list_ptr, t_offset);
205
206 /* Build the attribute offset */
207 attribute_offset = lp_build_const_int32(base->gallivm, 0);
208
209 /* Load the buffer index is always, which is always stored in VGPR0
210 * for Vertex Shaders */
211 buffer_index_reg = build_intrinsic(base->gallivm->builder,
212 "llvm.SI.vs.load.buffer.index", uint->elem_type, NULL, 0,
213 LLVMReadNoneAttribute);
214
215 vec4_type = LLVMVectorType(base->elem_type, 4);
216 args[0] = t_list;
217 args[1] = attribute_offset;
218 args[2] = buffer_index_reg;
219 input = lp_build_intrinsic(base->gallivm->builder,
220 "llvm.SI.vs.load.input", vec4_type, args, 3);
221
222 /* Break up the vec4 into individual components */
223 for (chan = 0; chan < 4; chan++) {
224 LLVMValueRef llvm_chan = lp_build_const_int32(base->gallivm, chan);
225 /* XXX: Use a helper function for this. There is one in
226 * tgsi_llvm.c. */
227 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, chan)] =
228 LLVMBuildExtractElement(base->gallivm->builder,
229 input, llvm_chan, "");
230 }
231 }
232
233 static void declare_input_fs(
234 struct si_shader_context * si_shader_ctx,
235 unsigned input_index,
236 const struct tgsi_full_declaration *decl)
237 {
238 const char * intr_name;
239 unsigned chan;
240 struct si_shader *shader = &si_shader_ctx->shader->shader;
241 struct lp_build_context * base =
242 &si_shader_ctx->radeon_bld.soa.bld_base.base;
243 struct gallivm_state * gallivm = base->gallivm;
244 LLVMTypeRef input_type = LLVMFloatTypeInContext(gallivm->context);
245
246 /* This value is:
247 * [15:0] NewPrimMask (Bit mask for each quad. It is set it the
248 * quad begins a new primitive. Bit 0 always needs
249 * to be unset)
250 * [32:16] ParamOffset
251 *
252 */
253 LLVMValueRef params = use_sgpr(base->gallivm, SGPR_I32, SI_PS_NUM_USER_SGPR);
254 LLVMValueRef attr_number;
255
256 if (decl->Semantic.Name == TGSI_SEMANTIC_POSITION) {
257 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
258 LLVMValueRef args[1];
259 unsigned soa_index =
260 radeon_llvm_reg_index_soa(input_index, chan);
261 args[0] = lp_build_const_int32(gallivm, chan);
262 si_shader_ctx->radeon_bld.inputs[soa_index] =
263 build_intrinsic(base->gallivm->builder,
264 "llvm.SI.fs.read.pos", input_type,
265 args, 1, LLVMReadNoneAttribute);
266 }
267 return;
268 }
269
270 if (decl->Semantic.Name == TGSI_SEMANTIC_FACE) {
271 LLVMValueRef face, is_face_positive;
272
273 face = build_intrinsic(gallivm->builder,
274 "llvm.SI.fs.read.face",
275 input_type,
276 NULL, 0, LLVMReadNoneAttribute);
277 is_face_positive = LLVMBuildFCmp(gallivm->builder,
278 LLVMRealUGT, face,
279 lp_build_const_float(gallivm, 0.0f),
280 "");
281
282 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 0)] =
283 LLVMBuildSelect(gallivm->builder,
284 is_face_positive,
285 lp_build_const_float(gallivm, 1.0f),
286 lp_build_const_float(gallivm, 0.0f),
287 "");
288 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 1)] =
289 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 2)] =
290 lp_build_const_float(gallivm, 0.0f);
291 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 3)] =
292 lp_build_const_float(gallivm, 1.0f);
293
294 return;
295 }
296
297 shader->input[input_index].param_offset = shader->ninterp++;
298 attr_number = lp_build_const_int32(gallivm,
299 shader->input[input_index].param_offset);
300
301 /* XXX: Handle all possible interpolation modes */
302 switch (decl->Interp.Interpolate) {
303 case TGSI_INTERPOLATE_COLOR:
304 /* XXX: Flat shading hangs the GPU */
305 if (si_shader_ctx->rctx->queued.named.rasterizer &&
306 si_shader_ctx->rctx->queued.named.rasterizer->flatshade) {
307 #if 0
308 intr_name = "llvm.SI.fs.interp.constant";
309 #else
310 intr_name = "llvm.SI.fs.interp.linear.center";
311 #endif
312 } else {
313 if (decl->Interp.Centroid)
314 intr_name = "llvm.SI.fs.interp.persp.centroid";
315 else
316 intr_name = "llvm.SI.fs.interp.persp.center";
317 }
318 break;
319 case TGSI_INTERPOLATE_CONSTANT:
320 /* XXX: Flat shading hangs the GPU */
321 #if 0
322 intr_name = "llvm.SI.fs.interp.constant";
323 break;
324 #endif
325 case TGSI_INTERPOLATE_LINEAR:
326 if (decl->Interp.Centroid)
327 intr_name = "llvm.SI.fs.interp.linear.centroid";
328 else
329 intr_name = "llvm.SI.fs.interp.linear.center";
330 break;
331 case TGSI_INTERPOLATE_PERSPECTIVE:
332 if (decl->Interp.Centroid)
333 intr_name = "llvm.SI.fs.interp.persp.centroid";
334 else
335 intr_name = "llvm.SI.fs.interp.persp.center";
336 break;
337 default:
338 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
339 return;
340 }
341
342 if (!si_shader_ctx->ninput_emitted++) {
343 /* Enable whole quad mode */
344 lp_build_intrinsic(gallivm->builder,
345 "llvm.SI.wqm",
346 LLVMVoidTypeInContext(gallivm->context),
347 NULL, 0);
348 }
349
350 /* XXX: Could there be more than TGSI_NUM_CHANNELS (4) ? */
351 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR &&
352 si_shader_ctx->key.color_two_side) {
353 LLVMValueRef args[3];
354 LLVMValueRef face, is_face_positive;
355 LLVMValueRef back_attr_number =
356 lp_build_const_int32(gallivm,
357 shader->input[input_index].param_offset + 1);
358
359 face = build_intrinsic(gallivm->builder,
360 "llvm.SI.fs.read.face",
361 input_type,
362 NULL, 0, LLVMReadNoneAttribute);
363 is_face_positive = LLVMBuildFCmp(gallivm->builder,
364 LLVMRealUGT, face,
365 lp_build_const_float(gallivm, 0.0f),
366 "");
367
368 args[2] = params;
369 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
370 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
371 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
372 LLVMValueRef front, back;
373
374 args[0] = llvm_chan;
375 args[1] = attr_number;
376 front = build_intrinsic(base->gallivm->builder, intr_name,
377 input_type, args, 3, LLVMReadOnlyAttribute);
378
379 args[1] = back_attr_number;
380 back = build_intrinsic(base->gallivm->builder, intr_name,
381 input_type, args, 3, LLVMReadOnlyAttribute);
382
383 si_shader_ctx->radeon_bld.inputs[soa_index] =
384 LLVMBuildSelect(gallivm->builder,
385 is_face_positive,
386 front,
387 back,
388 "");
389 }
390
391 shader->ninterp++;
392 } else {
393 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
394 LLVMValueRef args[3];
395 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
396 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
397 args[0] = llvm_chan;
398 args[1] = attr_number;
399 args[2] = params;
400 si_shader_ctx->radeon_bld.inputs[soa_index] =
401 build_intrinsic(base->gallivm->builder, intr_name,
402 input_type, args, 3, LLVMReadOnlyAttribute);
403 }
404 }
405 }
406
407 static void declare_input(
408 struct radeon_llvm_context * radeon_bld,
409 unsigned input_index,
410 const struct tgsi_full_declaration *decl)
411 {
412 struct si_shader_context * si_shader_ctx =
413 si_shader_context(&radeon_bld->soa.bld_base);
414 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
415 declare_input_vs(si_shader_ctx, input_index, decl);
416 } else if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
417 declare_input_fs(si_shader_ctx, input_index, decl);
418 } else {
419 fprintf(stderr, "Warning: Unsupported shader type,\n");
420 }
421 }
422
423 static LLVMValueRef fetch_constant(
424 struct lp_build_tgsi_context * bld_base,
425 const struct tgsi_full_src_register *reg,
426 enum tgsi_opcode_type type,
427 unsigned swizzle)
428 {
429 struct lp_build_context * base = &bld_base->base;
430 unsigned idx;
431
432 LLVMValueRef const_ptr;
433 LLVMValueRef offset;
434 LLVMValueRef load;
435
436 /* currently not supported */
437 if (reg->Register.Indirect) {
438 assert(0);
439 load = lp_build_const_int32(base->gallivm, 0);
440 return bitcast(bld_base, type, load);
441 }
442
443 const_ptr = use_sgpr(base->gallivm, SGPR_CONST_PTR_F32, SI_SGPR_CONST);
444
445 /* XXX: This assumes that the constant buffer is not packed, so
446 * CONST[0].x will have an offset of 0 and CONST[1].x will have an
447 * offset of 4. */
448 idx = (reg->Register.Index * 4) + swizzle;
449
450 /* index loads above 255 are currently not supported */
451 if (idx > 255) {
452 assert(0);
453 idx = 0;
454 }
455 offset = lp_build_const_int32(base->gallivm, idx);
456
457 load = build_indexed_load(base->gallivm, const_ptr, offset);
458 return bitcast(bld_base, type, load);
459 }
460
461 /* Initialize arguments for the shader export intrinsic */
462 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
463 struct tgsi_full_declaration *d,
464 unsigned index,
465 unsigned target,
466 LLVMValueRef *args)
467 {
468 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
469 struct lp_build_context *uint =
470 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
471 struct lp_build_context *base = &bld_base->base;
472 unsigned compressed = 0;
473 unsigned chan;
474
475 if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
476 int cbuf = target - V_008DFC_SQ_EXP_MRT;
477
478 if (cbuf >= 0 && cbuf < 8) {
479 struct r600_context *rctx = si_shader_ctx->rctx;
480 compressed = (si_shader_ctx->key.export_16bpc >> cbuf) & 0x1;
481
482 if (compressed)
483 si_shader_ctx->shader->spi_shader_col_format |=
484 V_028714_SPI_SHADER_FP16_ABGR << (4 * cbuf);
485 else
486 si_shader_ctx->shader->spi_shader_col_format |=
487 V_028714_SPI_SHADER_32_ABGR << (4 * cbuf);
488 }
489 }
490
491 if (compressed) {
492 /* Pixel shader needs to pack output values before export */
493 for (chan = 0; chan < 2; chan++ ) {
494 LLVMValueRef *out_ptr =
495 si_shader_ctx->radeon_bld.soa.outputs[index];
496 args[0] = LLVMBuildLoad(base->gallivm->builder,
497 out_ptr[2 * chan], "");
498 args[1] = LLVMBuildLoad(base->gallivm->builder,
499 out_ptr[2 * chan + 1], "");
500 args[chan + 5] =
501 build_intrinsic(base->gallivm->builder,
502 "llvm.SI.packf16",
503 LLVMInt32TypeInContext(base->gallivm->context),
504 args, 2,
505 LLVMReadNoneAttribute);
506 args[chan + 7] = args[chan + 5] =
507 LLVMBuildBitCast(base->gallivm->builder,
508 args[chan + 5],
509 LLVMFloatTypeInContext(base->gallivm->context),
510 "");
511 }
512
513 /* Set COMPR flag */
514 args[4] = uint->one;
515 } else {
516 for (chan = 0; chan < 4; chan++ ) {
517 LLVMValueRef out_ptr =
518 si_shader_ctx->radeon_bld.soa.outputs[index][chan];
519 /* +5 because the first output value will be
520 * the 6th argument to the intrinsic. */
521 args[chan + 5] = LLVMBuildLoad(base->gallivm->builder,
522 out_ptr, "");
523 }
524
525 /* Clear COMPR flag */
526 args[4] = uint->zero;
527 }
528
529 /* XXX: This controls which components of the output
530 * registers actually get exported. (e.g bit 0 means export
531 * X component, bit 1 means export Y component, etc.) I'm
532 * hard coding this to 0xf for now. In the future, we might
533 * want to do something else. */
534 args[0] = lp_build_const_int32(base->gallivm, 0xf);
535
536 /* Specify whether the EXEC mask represents the valid mask */
537 args[1] = uint->zero;
538
539 /* Specify whether this is the last export */
540 args[2] = uint->zero;
541
542 /* Specify the target we are exporting */
543 args[3] = lp_build_const_int32(base->gallivm, target);
544
545 /* XXX: We probably need to keep track of the output
546 * values, so we know what we are passing to the next
547 * stage. */
548 }
549
550 static void si_llvm_emit_prologue(struct lp_build_tgsi_context *bld_base)
551 {
552 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
553 struct gallivm_state *gallivm = bld_base->base.gallivm;
554 lp_build_intrinsic_unary(gallivm->builder,
555 "llvm.AMDGPU.shader.type",
556 LLVMVoidTypeInContext(gallivm->context),
557 lp_build_const_int32(gallivm, si_shader_ctx->type));
558 }
559
560
561 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
562 unsigned index)
563 {
564 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
565 struct gallivm_state *gallivm = bld_base->base.gallivm;
566
567 if (si_shader_ctx->key.alpha_func != PIPE_FUNC_NEVER) {
568 LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][3];
569 LLVMValueRef alpha_pass =
570 lp_build_cmp(&bld_base->base,
571 si_shader_ctx->key.alpha_func,
572 LLVMBuildLoad(gallivm->builder, out_ptr, ""),
573 lp_build_const_float(gallivm, si_shader_ctx->key.alpha_ref));
574 LLVMValueRef arg =
575 lp_build_select(&bld_base->base,
576 alpha_pass,
577 lp_build_const_float(gallivm, 1.0f),
578 lp_build_const_float(gallivm, -1.0f));
579
580 build_intrinsic(gallivm->builder,
581 "llvm.AMDGPU.kill",
582 LLVMVoidTypeInContext(gallivm->context),
583 &arg, 1, 0);
584 } else {
585 build_intrinsic(gallivm->builder,
586 "llvm.AMDGPU.kilp",
587 LLVMVoidTypeInContext(gallivm->context),
588 NULL, 0, 0);
589 }
590 }
591
592 /* XXX: This is partially implemented for VS only at this point. It is not complete */
593 static void si_llvm_emit_epilogue(struct lp_build_tgsi_context * bld_base)
594 {
595 struct si_shader_context * si_shader_ctx = si_shader_context(bld_base);
596 struct si_shader * shader = &si_shader_ctx->shader->shader;
597 struct lp_build_context * base = &bld_base->base;
598 struct lp_build_context * uint =
599 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
600 struct tgsi_parse_context *parse = &si_shader_ctx->parse;
601 LLVMValueRef args[9];
602 LLVMValueRef last_args[9] = { 0 };
603 unsigned color_count = 0;
604 unsigned param_count = 0;
605 int depth_index = -1, stencil_index = -1;
606
607 while (!tgsi_parse_end_of_tokens(parse)) {
608 struct tgsi_full_declaration *d =
609 &parse->FullToken.FullDeclaration;
610 unsigned target;
611 unsigned index;
612 int i;
613
614 tgsi_parse_token(parse);
615 if (parse->FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
616 continue;
617
618 switch (d->Declaration.File) {
619 case TGSI_FILE_INPUT:
620 i = shader->ninput++;
621 shader->input[i].name = d->Semantic.Name;
622 shader->input[i].sid = d->Semantic.Index;
623 shader->input[i].interpolate = d->Interp.Interpolate;
624 shader->input[i].centroid = d->Interp.Centroid;
625 continue;
626
627 case TGSI_FILE_OUTPUT:
628 i = shader->noutput++;
629 shader->output[i].name = d->Semantic.Name;
630 shader->output[i].sid = d->Semantic.Index;
631 shader->output[i].interpolate = d->Interp.Interpolate;
632 break;
633
634 default:
635 continue;
636 }
637
638 for (index = d->Range.First; index <= d->Range.Last; index++) {
639 /* Select the correct target */
640 switch(d->Semantic.Name) {
641 case TGSI_SEMANTIC_PSIZE:
642 target = V_008DFC_SQ_EXP_POS;
643 break;
644 case TGSI_SEMANTIC_POSITION:
645 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
646 target = V_008DFC_SQ_EXP_POS;
647 break;
648 } else {
649 depth_index = index;
650 continue;
651 }
652 case TGSI_SEMANTIC_STENCIL:
653 stencil_index = index;
654 continue;
655 case TGSI_SEMANTIC_COLOR:
656 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
657 case TGSI_SEMANTIC_BCOLOR:
658 target = V_008DFC_SQ_EXP_PARAM + param_count;
659 shader->output[i].param_offset = param_count;
660 param_count++;
661 } else {
662 target = V_008DFC_SQ_EXP_MRT + color_count;
663 if (color_count == 0 &&
664 si_shader_ctx->key.alpha_func != PIPE_FUNC_ALWAYS)
665 si_alpha_test(bld_base, index);
666
667 color_count++;
668 }
669 break;
670 case TGSI_SEMANTIC_FOG:
671 case TGSI_SEMANTIC_GENERIC:
672 target = V_008DFC_SQ_EXP_PARAM + param_count;
673 shader->output[i].param_offset = param_count;
674 param_count++;
675 break;
676 default:
677 target = 0;
678 fprintf(stderr,
679 "Warning: SI unhandled output type:%d\n",
680 d->Semantic.Name);
681 }
682
683 si_llvm_init_export_args(bld_base, d, index, target, args);
684
685 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX ?
686 (d->Semantic.Name == TGSI_SEMANTIC_POSITION) :
687 (d->Semantic.Name == TGSI_SEMANTIC_COLOR)) {
688 if (last_args[0]) {
689 lp_build_intrinsic(base->gallivm->builder,
690 "llvm.SI.export",
691 LLVMVoidTypeInContext(base->gallivm->context),
692 last_args, 9);
693 }
694
695 memcpy(last_args, args, sizeof(args));
696 } else {
697 lp_build_intrinsic(base->gallivm->builder,
698 "llvm.SI.export",
699 LLVMVoidTypeInContext(base->gallivm->context),
700 args, 9);
701 }
702
703 }
704 }
705
706 if (depth_index >= 0 || stencil_index >= 0) {
707 LLVMValueRef out_ptr;
708 unsigned mask = 0;
709
710 /* Specify the target we are exporting */
711 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRTZ);
712
713 if (depth_index >= 0) {
714 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[depth_index][2];
715 args[5] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
716 mask |= 0x1;
717
718 if (stencil_index < 0) {
719 args[6] =
720 args[7] =
721 args[8] = args[5];
722 }
723 }
724
725 if (stencil_index >= 0) {
726 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[stencil_index][1];
727 args[7] =
728 args[8] =
729 args[6] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
730 mask |= 0x2;
731
732 if (depth_index < 0)
733 args[5] = args[6];
734 }
735
736 /* Specify which components to enable */
737 args[0] = lp_build_const_int32(base->gallivm, mask);
738
739 args[1] =
740 args[2] =
741 args[4] = uint->zero;
742
743 if (last_args[0])
744 lp_build_intrinsic(base->gallivm->builder,
745 "llvm.SI.export",
746 LLVMVoidTypeInContext(base->gallivm->context),
747 args, 9);
748 else
749 memcpy(last_args, args, sizeof(args));
750 }
751
752 if (!last_args[0]) {
753 assert(si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
754
755 /* Specify which components to enable */
756 last_args[0] = lp_build_const_int32(base->gallivm, 0x0);
757
758 /* Specify the target we are exporting */
759 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
760
761 /* Set COMPR flag to zero to export data as 32-bit */
762 last_args[4] = uint->zero;
763
764 /* dummy bits */
765 last_args[5]= uint->zero;
766 last_args[6]= uint->zero;
767 last_args[7]= uint->zero;
768 last_args[8]= uint->zero;
769
770 si_shader_ctx->shader->spi_shader_col_format |=
771 V_028714_SPI_SHADER_32_ABGR;
772 }
773
774 /* Specify whether the EXEC mask represents the valid mask */
775 last_args[1] = lp_build_const_int32(base->gallivm,
776 si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
777
778 /* Specify that this is the last export */
779 last_args[2] = lp_build_const_int32(base->gallivm, 1);
780
781 lp_build_intrinsic(base->gallivm->builder,
782 "llvm.SI.export",
783 LLVMVoidTypeInContext(base->gallivm->context),
784 last_args, 9);
785
786 /* XXX: Look up what this function does */
787 /* ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);*/
788 }
789
790 static void tex_fetch_args(
791 struct lp_build_tgsi_context * bld_base,
792 struct lp_build_emit_data * emit_data)
793 {
794 const struct tgsi_full_instruction * inst = emit_data->inst;
795 LLVMValueRef ptr;
796 LLVMValueRef offset;
797
798 /* WriteMask */
799 /* XXX: should be optimized using emit_data->inst->Dst[0].Register.WriteMask*/
800 emit_data->args[0] = lp_build_const_int32(bld_base->base.gallivm, 0xf);
801
802 /* Coordinates */
803 /* XXX: Not all sample instructions need 4 address arguments. */
804 if (inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
805 LLVMValueRef src_w;
806 unsigned chan;
807 LLVMValueRef coords[4];
808
809 emit_data->dst_type = LLVMVectorType(bld_base->base.elem_type, 4);
810 src_w = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
811
812 for (chan = 0; chan < 3; chan++ ) {
813 LLVMValueRef arg = lp_build_emit_fetch(bld_base,
814 emit_data->inst, 0, chan);
815 coords[chan] = lp_build_emit_llvm_binary(bld_base,
816 TGSI_OPCODE_DIV,
817 arg, src_w);
818 }
819 coords[3] = bld_base->base.one;
820 emit_data->args[1] = lp_build_gather_values(bld_base->base.gallivm,
821 coords, 4);
822 } else
823 emit_data->args[1] = lp_build_emit_fetch(bld_base, emit_data->inst,
824 0, LP_CHAN_ALL);
825
826 if (inst->Instruction.Opcode == TGSI_OPCODE_TEX2 ||
827 inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
828 inst->Instruction.Opcode == TGSI_OPCODE_TXL2) {
829 /* These instructions have additional operand that should be packed
830 * into the cube coord vector by radeon_llvm_emit_prepare_cube_coords.
831 * That operand should be passed as a float value in the args array
832 * right after the coord vector. After packing it's not used anymore,
833 * that's why arg_count is not increased */
834 emit_data->args[2] = lp_build_emit_fetch(bld_base, inst, 1, 0);
835 }
836
837 if ((inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
838 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE) &&
839 inst->Instruction.Opcode != TGSI_OPCODE_TXQ) {
840 radeon_llvm_emit_prepare_cube_coords(bld_base, emit_data, 1);
841 }
842
843 /* Resource */
844 ptr = use_sgpr(bld_base->base.gallivm, SGPR_CONST_PTR_V8I32, SI_SGPR_RESOURCE);
845 offset = lp_build_const_int32(bld_base->base.gallivm,
846 emit_data->inst->Src[1].Register.Index);
847 emit_data->args[2] = build_indexed_load(bld_base->base.gallivm,
848 ptr, offset);
849
850 /* Sampler */
851 ptr = use_sgpr(bld_base->base.gallivm, SGPR_CONST_PTR_V4I32, SI_SGPR_SAMPLER);
852 offset = lp_build_const_int32(bld_base->base.gallivm,
853 emit_data->inst->Src[1].Register.Index);
854 emit_data->args[3] = build_indexed_load(bld_base->base.gallivm,
855 ptr, offset);
856
857 /* Dimensions */
858 emit_data->args[4] = lp_build_const_int32(bld_base->base.gallivm,
859 emit_data->inst->Texture.Texture);
860
861 emit_data->arg_count = 5;
862 /* XXX: To optimize, we could use a float or v2f32, if the last bits of
863 * the writemask are clear */
864 emit_data->dst_type = LLVMVectorType(
865 LLVMFloatTypeInContext(bld_base->base.gallivm->context),
866 4);
867 }
868
869 static const struct lp_build_tgsi_action tex_action = {
870 .fetch_args = tex_fetch_args,
871 .emit = lp_build_tgsi_intrinsic,
872 .intr_name = "llvm.SI.sample"
873 };
874
875 static const struct lp_build_tgsi_action txb_action = {
876 .fetch_args = tex_fetch_args,
877 .emit = lp_build_tgsi_intrinsic,
878 .intr_name = "llvm.SI.sample.bias"
879 };
880
881 static const struct lp_build_tgsi_action txl_action = {
882 .fetch_args = tex_fetch_args,
883 .emit = lp_build_tgsi_intrinsic,
884 .intr_name = "llvm.SI.sample.lod"
885 };
886
887
888 int si_pipe_shader_create(
889 struct pipe_context *ctx,
890 struct si_pipe_shader *shader,
891 struct si_shader_key key)
892 {
893 struct r600_context *rctx = (struct r600_context*)ctx;
894 struct si_pipe_shader_selector *sel = shader->selector;
895 struct si_shader_context si_shader_ctx;
896 struct tgsi_shader_info shader_info;
897 struct lp_build_tgsi_context * bld_base;
898 LLVMModuleRef mod;
899 unsigned char * inst_bytes;
900 unsigned inst_byte_count;
901 unsigned i;
902 uint32_t *ptr;
903 bool dump;
904
905 dump = debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE);
906
907 assert(shader->shader.noutput == 0);
908 assert(shader->shader.ninterp == 0);
909 assert(shader->shader.ninput == 0);
910
911 memset(&si_shader_ctx, 0, sizeof(si_shader_ctx));
912 radeon_llvm_context_init(&si_shader_ctx.radeon_bld);
913 bld_base = &si_shader_ctx.radeon_bld.soa.bld_base;
914
915 tgsi_scan_shader(sel->tokens, &shader_info);
916 if (shader_info.indirect_files != 0) {
917 fprintf(stderr, "Indirect addressing not fully handled yet\n");
918 return -ENOSYS;
919 }
920
921 shader->shader.uses_kill = shader_info.uses_kill;
922 bld_base->info = &shader_info;
923 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
924 bld_base->emit_prologue = si_llvm_emit_prologue;
925 bld_base->emit_epilogue = si_llvm_emit_epilogue;
926
927 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
928 bld_base->op_actions[TGSI_OPCODE_TXB] = txb_action;
929 bld_base->op_actions[TGSI_OPCODE_TXL] = txl_action;
930 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
931
932 si_shader_ctx.radeon_bld.load_input = declare_input;
933 si_shader_ctx.tokens = sel->tokens;
934 tgsi_parse_init(&si_shader_ctx.parse, si_shader_ctx.tokens);
935 si_shader_ctx.shader = shader;
936 si_shader_ctx.key = key;
937 si_shader_ctx.type = si_shader_ctx.parse.FullHeader.Processor.Processor;
938 si_shader_ctx.rctx = rctx;
939
940 shader->shader.nr_cbufs = rctx->framebuffer.nr_cbufs;
941
942 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
943 * conversion fails. */
944 if (dump) {
945 tgsi_dump(sel->tokens, 0);
946 }
947
948 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
949 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
950 return -EINVAL;
951 }
952
953 radeon_llvm_finalize_module(&si_shader_ctx.radeon_bld);
954
955 mod = bld_base->base.gallivm->module;
956 if (dump) {
957 LLVMDumpModule(mod);
958 }
959 radeon_llvm_compile(mod, &inst_bytes, &inst_byte_count, "SI", dump);
960 if (dump) {
961 fprintf(stderr, "SI CODE:\n");
962 for (i = 0; i < inst_byte_count; i+=4 ) {
963 fprintf(stderr, "%02x%02x%02x%02x\n", inst_bytes[i + 3],
964 inst_bytes[i + 2], inst_bytes[i + 1],
965 inst_bytes[i]);
966 }
967 }
968
969 shader->num_sgprs = util_le32_to_cpu(*(uint32_t*)inst_bytes);
970 shader->num_vgprs = util_le32_to_cpu(*(uint32_t*)(inst_bytes + 4));
971 shader->spi_ps_input_ena = util_le32_to_cpu(*(uint32_t*)(inst_bytes + 8));
972
973 radeon_llvm_dispose(&si_shader_ctx.radeon_bld);
974 tgsi_parse_free(&si_shader_ctx.parse);
975
976 /* copy new shader */
977 si_resource_reference(&shader->bo, NULL);
978 shader->bo = si_resource_create_custom(ctx->screen, PIPE_USAGE_IMMUTABLE,
979 inst_byte_count - 12);
980 if (shader->bo == NULL) {
981 return -ENOMEM;
982 }
983
984 ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
985 if (0 /*R600_BIG_ENDIAN*/) {
986 for (i = 0; i < (inst_byte_count-12)/4; ++i) {
987 ptr[i] = util_bswap32(*(uint32_t*)(inst_bytes+12 + i*4));
988 }
989 } else {
990 memcpy(ptr, inst_bytes + 12, inst_byte_count - 12);
991 }
992 rctx->ws->buffer_unmap(shader->bo->cs_buf);
993
994 free(inst_bytes);
995
996 return 0;
997 }
998
999 void si_pipe_shader_destroy(struct pipe_context *ctx, struct si_pipe_shader *shader)
1000 {
1001 si_resource_reference(&shader->bo, NULL);
1002 }