radeonsi: Pass texture type to sampling intrinsics.
[mesa.git] / src / gallium / drivers / radeonsi / radeonsi_shader.c
1
2 /*
3 * Copyright 2012 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Tom Stellard <thomas.stellard@amd.com>
26 * Michel Dänzer <michel.daenzer@amd.com>
27 * Christian König <christian.koenig@amd.com>
28 */
29
30 #include "gallivm/lp_bld_tgsi_action.h"
31 #include "gallivm/lp_bld_const.h"
32 #include "gallivm/lp_bld_gather.h"
33 #include "gallivm/lp_bld_intr.h"
34 #include "gallivm/lp_bld_logic.h"
35 #include "gallivm/lp_bld_tgsi.h"
36 #include "radeon_llvm.h"
37 #include "radeon_llvm_emit.h"
38 #include "tgsi/tgsi_info.h"
39 #include "tgsi/tgsi_parse.h"
40 #include "tgsi/tgsi_scan.h"
41 #include "tgsi/tgsi_dump.h"
42
43 #include "radeonsi_pipe.h"
44 #include "radeonsi_shader.h"
45 #include "si_state.h"
46 #include "sid.h"
47
48 #include <assert.h>
49 #include <errno.h>
50 #include <stdio.h>
51
52 struct si_shader_context
53 {
54 struct radeon_llvm_context radeon_bld;
55 struct r600_context *rctx;
56 struct tgsi_parse_context parse;
57 struct tgsi_token * tokens;
58 struct si_pipe_shader *shader;
59 struct si_shader_key key;
60 unsigned type; /* TGSI_PROCESSOR_* specifies the type of shader. */
61 unsigned ninput_emitted;
62 /* struct list_head inputs; */
63 /* unsigned * input_mappings *//* From TGSI to SI hw */
64 /* struct tgsi_shader_info info;*/
65 };
66
67 static struct si_shader_context * si_shader_context(
68 struct lp_build_tgsi_context * bld_base)
69 {
70 return (struct si_shader_context *)bld_base;
71 }
72
73
74 #define PERSPECTIVE_BASE 0
75 #define LINEAR_BASE 9
76
77 #define SAMPLE_OFFSET 0
78 #define CENTER_OFFSET 2
79 #define CENTROID_OFSET 4
80
81 #define USE_SGPR_MAX_SUFFIX_LEN 5
82 #define CONST_ADDR_SPACE 2
83 #define USER_SGPR_ADDR_SPACE 8
84
85 enum sgpr_type {
86 SGPR_CONST_PTR_F32,
87 SGPR_CONST_PTR_V4I32,
88 SGPR_CONST_PTR_V8I32,
89 SGPR_I32,
90 SGPR_I64
91 };
92
93 /**
94 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad
95 *
96 * @param offset The offset parameter specifies the number of
97 * elements to offset, not the number of bytes or dwords. An element is the
98 * the type pointed to by the base_ptr parameter (e.g. int is the element of
99 * an int* pointer)
100 *
101 * When LLVM lowers the load instruction, it will convert the element offset
102 * into a dword offset automatically.
103 *
104 */
105 static LLVMValueRef build_indexed_load(
106 struct gallivm_state * gallivm,
107 LLVMValueRef base_ptr,
108 LLVMValueRef offset)
109 {
110 LLVMValueRef computed_ptr = LLVMBuildGEP(
111 gallivm->builder, base_ptr, &offset, 1, "");
112
113 return LLVMBuildLoad(gallivm->builder, computed_ptr, "");
114 }
115
116 /**
117 * Load a value stored in one of the user SGPRs
118 *
119 * @param sgpr This is the sgpr to load the value from. If you need to load a
120 * value that is stored in consecutive SGPR registers (e.g. a 64-bit pointer),
121 * then you should pass the index of the first SGPR that holds the value. For
122 * example, if you want to load a pointer that is stored in SGPRs 2 and 3, then
123 * use pass 2 for the sgpr parameter.
124 *
125 * The value of the sgpr parameter must also be aligned to the width of the type
126 * being loaded, so that the sgpr parameter is divisible by the dword width of the
127 * type. For example, if the value being loaded is two dwords wide, then the sgpr
128 * parameter must be divisible by two.
129 */
130 static LLVMValueRef use_sgpr(
131 struct gallivm_state * gallivm,
132 enum sgpr_type type,
133 unsigned sgpr)
134 {
135 LLVMValueRef sgpr_index;
136 LLVMTypeRef ret_type;
137 LLVMValueRef ptr;
138
139 sgpr_index = lp_build_const_int32(gallivm, sgpr);
140
141 switch (type) {
142 case SGPR_CONST_PTR_F32:
143 assert(sgpr % 2 == 0);
144 ret_type = LLVMFloatTypeInContext(gallivm->context);
145 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
146 break;
147
148 case SGPR_I32:
149 ret_type = LLVMInt32TypeInContext(gallivm->context);
150 break;
151
152 case SGPR_I64:
153 assert(sgpr % 2 == 0);
154 ret_type= LLVMInt64TypeInContext(gallivm->context);
155 break;
156
157 case SGPR_CONST_PTR_V4I32:
158 assert(sgpr % 2 == 0);
159 ret_type = LLVMInt32TypeInContext(gallivm->context);
160 ret_type = LLVMVectorType(ret_type, 4);
161 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
162 break;
163
164 case SGPR_CONST_PTR_V8I32:
165 assert(sgpr % 2 == 0);
166 ret_type = LLVMInt32TypeInContext(gallivm->context);
167 ret_type = LLVMVectorType(ret_type, 8);
168 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
169 break;
170
171 default:
172 assert(!"Unsupported SGPR type in use_sgpr()");
173 return NULL;
174 }
175
176 ret_type = LLVMPointerType(ret_type, USER_SGPR_ADDR_SPACE);
177 ptr = LLVMBuildIntToPtr(gallivm->builder, sgpr_index, ret_type, "");
178 return LLVMBuildLoad(gallivm->builder, ptr, "");
179 }
180
181 static void declare_input_vs(
182 struct si_shader_context * si_shader_ctx,
183 unsigned input_index,
184 const struct tgsi_full_declaration *decl)
185 {
186 LLVMValueRef t_list_ptr;
187 LLVMValueRef t_offset;
188 LLVMValueRef t_list;
189 LLVMValueRef attribute_offset;
190 LLVMValueRef buffer_index_reg;
191 LLVMValueRef args[3];
192 LLVMTypeRef vec4_type;
193 LLVMValueRef input;
194 struct lp_build_context * uint = &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
195 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
196 //struct pipe_vertex_element *velem = &rctx->vertex_elements->elements[input_index];
197 unsigned chan;
198
199 /* Load the T list */
200 t_list_ptr = use_sgpr(base->gallivm, SGPR_CONST_PTR_V4I32, SI_SGPR_VERTEX_BUFFER);
201
202 t_offset = lp_build_const_int32(base->gallivm, input_index);
203
204 t_list = build_indexed_load(base->gallivm, t_list_ptr, t_offset);
205
206 /* Build the attribute offset */
207 attribute_offset = lp_build_const_int32(base->gallivm, 0);
208
209 /* Load the buffer index is always, which is always stored in VGPR0
210 * for Vertex Shaders */
211 buffer_index_reg = build_intrinsic(base->gallivm->builder,
212 "llvm.SI.vs.load.buffer.index", uint->elem_type, NULL, 0,
213 LLVMReadNoneAttribute);
214
215 vec4_type = LLVMVectorType(base->elem_type, 4);
216 args[0] = t_list;
217 args[1] = attribute_offset;
218 args[2] = buffer_index_reg;
219 input = lp_build_intrinsic(base->gallivm->builder,
220 "llvm.SI.vs.load.input", vec4_type, args, 3);
221
222 /* Break up the vec4 into individual components */
223 for (chan = 0; chan < 4; chan++) {
224 LLVMValueRef llvm_chan = lp_build_const_int32(base->gallivm, chan);
225 /* XXX: Use a helper function for this. There is one in
226 * tgsi_llvm.c. */
227 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, chan)] =
228 LLVMBuildExtractElement(base->gallivm->builder,
229 input, llvm_chan, "");
230 }
231 }
232
233 static void declare_input_fs(
234 struct si_shader_context * si_shader_ctx,
235 unsigned input_index,
236 const struct tgsi_full_declaration *decl)
237 {
238 const char * intr_name;
239 unsigned chan;
240 struct si_shader *shader = &si_shader_ctx->shader->shader;
241 struct lp_build_context * base =
242 &si_shader_ctx->radeon_bld.soa.bld_base.base;
243 struct gallivm_state * gallivm = base->gallivm;
244 LLVMTypeRef input_type = LLVMFloatTypeInContext(gallivm->context);
245
246 /* This value is:
247 * [15:0] NewPrimMask (Bit mask for each quad. It is set it the
248 * quad begins a new primitive. Bit 0 always needs
249 * to be unset)
250 * [32:16] ParamOffset
251 *
252 */
253 LLVMValueRef params = use_sgpr(base->gallivm, SGPR_I32, SI_PS_NUM_USER_SGPR);
254 LLVMValueRef attr_number;
255
256 if (decl->Semantic.Name == TGSI_SEMANTIC_POSITION) {
257 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
258 LLVMValueRef args[1];
259 unsigned soa_index =
260 radeon_llvm_reg_index_soa(input_index, chan);
261 args[0] = lp_build_const_int32(gallivm, chan);
262 si_shader_ctx->radeon_bld.inputs[soa_index] =
263 build_intrinsic(base->gallivm->builder,
264 "llvm.SI.fs.read.pos", input_type,
265 args, 1, LLVMReadNoneAttribute);
266 }
267 return;
268 }
269
270 if (decl->Semantic.Name == TGSI_SEMANTIC_FACE) {
271 LLVMValueRef face, is_face_positive;
272
273 face = build_intrinsic(gallivm->builder,
274 "llvm.SI.fs.read.face",
275 input_type,
276 NULL, 0, LLVMReadNoneAttribute);
277 is_face_positive = LLVMBuildFCmp(gallivm->builder,
278 LLVMRealUGT, face,
279 lp_build_const_float(gallivm, 0.0f),
280 "");
281
282 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 0)] =
283 LLVMBuildSelect(gallivm->builder,
284 is_face_positive,
285 lp_build_const_float(gallivm, 1.0f),
286 lp_build_const_float(gallivm, 0.0f),
287 "");
288 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 1)] =
289 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 2)] =
290 lp_build_const_float(gallivm, 0.0f);
291 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 3)] =
292 lp_build_const_float(gallivm, 1.0f);
293
294 return;
295 }
296
297 shader->input[input_index].param_offset = shader->ninterp++;
298 attr_number = lp_build_const_int32(gallivm,
299 shader->input[input_index].param_offset);
300
301 /* XXX: Handle all possible interpolation modes */
302 switch (decl->Interp.Interpolate) {
303 case TGSI_INTERPOLATE_COLOR:
304 /* XXX: Flat shading hangs the GPU */
305 if (si_shader_ctx->rctx->queued.named.rasterizer &&
306 si_shader_ctx->rctx->queued.named.rasterizer->flatshade) {
307 #if 0
308 intr_name = "llvm.SI.fs.interp.constant";
309 #else
310 intr_name = "llvm.SI.fs.interp.linear.center";
311 #endif
312 } else {
313 if (decl->Interp.Centroid)
314 intr_name = "llvm.SI.fs.interp.persp.centroid";
315 else
316 intr_name = "llvm.SI.fs.interp.persp.center";
317 }
318 break;
319 case TGSI_INTERPOLATE_CONSTANT:
320 /* XXX: Flat shading hangs the GPU */
321 #if 0
322 intr_name = "llvm.SI.fs.interp.constant";
323 break;
324 #endif
325 case TGSI_INTERPOLATE_LINEAR:
326 if (decl->Interp.Centroid)
327 intr_name = "llvm.SI.fs.interp.linear.centroid";
328 else
329 intr_name = "llvm.SI.fs.interp.linear.center";
330 break;
331 case TGSI_INTERPOLATE_PERSPECTIVE:
332 if (decl->Interp.Centroid)
333 intr_name = "llvm.SI.fs.interp.persp.centroid";
334 else
335 intr_name = "llvm.SI.fs.interp.persp.center";
336 break;
337 default:
338 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
339 return;
340 }
341
342 if (!si_shader_ctx->ninput_emitted++) {
343 /* Enable whole quad mode */
344 lp_build_intrinsic(gallivm->builder,
345 "llvm.SI.wqm",
346 LLVMVoidTypeInContext(gallivm->context),
347 NULL, 0);
348 }
349
350 /* XXX: Could there be more than TGSI_NUM_CHANNELS (4) ? */
351 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR &&
352 si_shader_ctx->key.color_two_side) {
353 LLVMValueRef args[3];
354 LLVMValueRef face, is_face_positive;
355 LLVMValueRef back_attr_number =
356 lp_build_const_int32(gallivm,
357 shader->input[input_index].param_offset + 1);
358
359 face = build_intrinsic(gallivm->builder,
360 "llvm.SI.fs.read.face",
361 input_type,
362 NULL, 0, LLVMReadNoneAttribute);
363 is_face_positive = LLVMBuildFCmp(gallivm->builder,
364 LLVMRealUGT, face,
365 lp_build_const_float(gallivm, 0.0f),
366 "");
367
368 args[2] = params;
369 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
370 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
371 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
372 LLVMValueRef front, back;
373
374 args[0] = llvm_chan;
375 args[1] = attr_number;
376 front = build_intrinsic(base->gallivm->builder, intr_name,
377 input_type, args, 3, LLVMReadOnlyAttribute);
378
379 args[1] = back_attr_number;
380 back = build_intrinsic(base->gallivm->builder, intr_name,
381 input_type, args, 3, LLVMReadOnlyAttribute);
382
383 si_shader_ctx->radeon_bld.inputs[soa_index] =
384 LLVMBuildSelect(gallivm->builder,
385 is_face_positive,
386 front,
387 back,
388 "");
389 }
390
391 shader->ninterp++;
392 } else {
393 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
394 LLVMValueRef args[3];
395 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
396 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
397 args[0] = llvm_chan;
398 args[1] = attr_number;
399 args[2] = params;
400 si_shader_ctx->radeon_bld.inputs[soa_index] =
401 build_intrinsic(base->gallivm->builder, intr_name,
402 input_type, args, 3, LLVMReadOnlyAttribute);
403 }
404 }
405 }
406
407 static void declare_input(
408 struct radeon_llvm_context * radeon_bld,
409 unsigned input_index,
410 const struct tgsi_full_declaration *decl)
411 {
412 struct si_shader_context * si_shader_ctx =
413 si_shader_context(&radeon_bld->soa.bld_base);
414 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
415 declare_input_vs(si_shader_ctx, input_index, decl);
416 } else if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
417 declare_input_fs(si_shader_ctx, input_index, decl);
418 } else {
419 fprintf(stderr, "Warning: Unsupported shader type,\n");
420 }
421 }
422
423 static LLVMValueRef fetch_constant(
424 struct lp_build_tgsi_context * bld_base,
425 const struct tgsi_full_src_register *reg,
426 enum tgsi_opcode_type type,
427 unsigned swizzle)
428 {
429 struct lp_build_context * base = &bld_base->base;
430 unsigned idx;
431
432 LLVMValueRef const_ptr;
433 LLVMValueRef offset;
434 LLVMValueRef load;
435
436 /* currently not supported */
437 if (reg->Register.Indirect) {
438 assert(0);
439 load = lp_build_const_int32(base->gallivm, 0);
440 return bitcast(bld_base, type, load);
441 }
442
443 const_ptr = use_sgpr(base->gallivm, SGPR_CONST_PTR_F32, SI_SGPR_CONST);
444
445 /* XXX: This assumes that the constant buffer is not packed, so
446 * CONST[0].x will have an offset of 0 and CONST[1].x will have an
447 * offset of 4. */
448 idx = (reg->Register.Index * 4) + swizzle;
449
450 /* index loads above 255 are currently not supported */
451 if (idx > 255) {
452 assert(0);
453 idx = 0;
454 }
455 offset = lp_build_const_int32(base->gallivm, idx);
456
457 load = build_indexed_load(base->gallivm, const_ptr, offset);
458 return bitcast(bld_base, type, load);
459 }
460
461 /* Initialize arguments for the shader export intrinsic */
462 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
463 struct tgsi_full_declaration *d,
464 unsigned index,
465 unsigned target,
466 LLVMValueRef *args)
467 {
468 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
469 struct lp_build_context *uint =
470 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
471 struct lp_build_context *base = &bld_base->base;
472 unsigned compressed = 0;
473 unsigned chan;
474
475 if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
476 int cbuf = target - V_008DFC_SQ_EXP_MRT;
477
478 if (cbuf >= 0 && cbuf < 8) {
479 struct r600_context *rctx = si_shader_ctx->rctx;
480 compressed = (si_shader_ctx->key.export_16bpc >> cbuf) & 0x1;
481 }
482 }
483
484 if (compressed) {
485 /* Pixel shader needs to pack output values before export */
486 for (chan = 0; chan < 2; chan++ ) {
487 LLVMValueRef *out_ptr =
488 si_shader_ctx->radeon_bld.soa.outputs[index];
489 args[0] = LLVMBuildLoad(base->gallivm->builder,
490 out_ptr[2 * chan], "");
491 args[1] = LLVMBuildLoad(base->gallivm->builder,
492 out_ptr[2 * chan + 1], "");
493 args[chan + 5] =
494 build_intrinsic(base->gallivm->builder,
495 "llvm.SI.packf16",
496 LLVMInt32TypeInContext(base->gallivm->context),
497 args, 2,
498 LLVMReadNoneAttribute);
499 args[chan + 7] = args[chan + 5] =
500 LLVMBuildBitCast(base->gallivm->builder,
501 args[chan + 5],
502 LLVMFloatTypeInContext(base->gallivm->context),
503 "");
504 }
505
506 /* Set COMPR flag */
507 args[4] = uint->one;
508 } else {
509 for (chan = 0; chan < 4; chan++ ) {
510 LLVMValueRef out_ptr =
511 si_shader_ctx->radeon_bld.soa.outputs[index][chan];
512 /* +5 because the first output value will be
513 * the 6th argument to the intrinsic. */
514 args[chan + 5] = LLVMBuildLoad(base->gallivm->builder,
515 out_ptr, "");
516 }
517
518 /* Clear COMPR flag */
519 args[4] = uint->zero;
520 }
521
522 /* XXX: This controls which components of the output
523 * registers actually get exported. (e.g bit 0 means export
524 * X component, bit 1 means export Y component, etc.) I'm
525 * hard coding this to 0xf for now. In the future, we might
526 * want to do something else. */
527 args[0] = lp_build_const_int32(base->gallivm, 0xf);
528
529 /* Specify whether the EXEC mask represents the valid mask */
530 args[1] = uint->zero;
531
532 /* Specify whether this is the last export */
533 args[2] = uint->zero;
534
535 /* Specify the target we are exporting */
536 args[3] = lp_build_const_int32(base->gallivm, target);
537
538 /* XXX: We probably need to keep track of the output
539 * values, so we know what we are passing to the next
540 * stage. */
541 }
542
543 static void si_llvm_emit_prologue(struct lp_build_tgsi_context *bld_base)
544 {
545 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
546 struct gallivm_state *gallivm = bld_base->base.gallivm;
547 lp_build_intrinsic_unary(gallivm->builder,
548 "llvm.AMDGPU.shader.type",
549 LLVMVoidTypeInContext(gallivm->context),
550 lp_build_const_int32(gallivm, si_shader_ctx->type));
551 }
552
553
554 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
555 unsigned index)
556 {
557 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
558 struct gallivm_state *gallivm = bld_base->base.gallivm;
559
560 if (si_shader_ctx->key.alpha_func != PIPE_FUNC_NEVER) {
561 LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][3];
562 LLVMValueRef alpha_pass =
563 lp_build_cmp(&bld_base->base,
564 si_shader_ctx->key.alpha_func,
565 LLVMBuildLoad(gallivm->builder, out_ptr, ""),
566 lp_build_const_float(gallivm, si_shader_ctx->key.alpha_ref));
567 LLVMValueRef arg =
568 lp_build_select(&bld_base->base,
569 alpha_pass,
570 lp_build_const_float(gallivm, 1.0f),
571 lp_build_const_float(gallivm, -1.0f));
572
573 build_intrinsic(gallivm->builder,
574 "llvm.AMDGPU.kill",
575 LLVMVoidTypeInContext(gallivm->context),
576 &arg, 1, 0);
577 } else {
578 build_intrinsic(gallivm->builder,
579 "llvm.AMDGPU.kilp",
580 LLVMVoidTypeInContext(gallivm->context),
581 NULL, 0, 0);
582 }
583 }
584
585 /* XXX: This is partially implemented for VS only at this point. It is not complete */
586 static void si_llvm_emit_epilogue(struct lp_build_tgsi_context * bld_base)
587 {
588 struct si_shader_context * si_shader_ctx = si_shader_context(bld_base);
589 struct si_shader * shader = &si_shader_ctx->shader->shader;
590 struct lp_build_context * base = &bld_base->base;
591 struct lp_build_context * uint =
592 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
593 struct tgsi_parse_context *parse = &si_shader_ctx->parse;
594 LLVMValueRef args[9];
595 LLVMValueRef last_args[9] = { 0 };
596 unsigned color_count = 0;
597 unsigned param_count = 0;
598 int depth_index = -1, stencil_index = -1;
599
600 while (!tgsi_parse_end_of_tokens(parse)) {
601 struct tgsi_full_declaration *d =
602 &parse->FullToken.FullDeclaration;
603 unsigned target;
604 unsigned index;
605 int i;
606
607 tgsi_parse_token(parse);
608 if (parse->FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
609 continue;
610
611 switch (d->Declaration.File) {
612 case TGSI_FILE_INPUT:
613 i = shader->ninput++;
614 shader->input[i].name = d->Semantic.Name;
615 shader->input[i].sid = d->Semantic.Index;
616 shader->input[i].interpolate = d->Interp.Interpolate;
617 shader->input[i].centroid = d->Interp.Centroid;
618 continue;
619
620 case TGSI_FILE_OUTPUT:
621 i = shader->noutput++;
622 shader->output[i].name = d->Semantic.Name;
623 shader->output[i].sid = d->Semantic.Index;
624 shader->output[i].interpolate = d->Interp.Interpolate;
625 break;
626
627 default:
628 continue;
629 }
630
631 for (index = d->Range.First; index <= d->Range.Last; index++) {
632 /* Select the correct target */
633 switch(d->Semantic.Name) {
634 case TGSI_SEMANTIC_PSIZE:
635 target = V_008DFC_SQ_EXP_POS;
636 break;
637 case TGSI_SEMANTIC_POSITION:
638 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
639 target = V_008DFC_SQ_EXP_POS;
640 break;
641 } else {
642 depth_index = index;
643 continue;
644 }
645 case TGSI_SEMANTIC_STENCIL:
646 stencil_index = index;
647 continue;
648 case TGSI_SEMANTIC_COLOR:
649 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
650 case TGSI_SEMANTIC_BCOLOR:
651 target = V_008DFC_SQ_EXP_PARAM + param_count;
652 shader->output[i].param_offset = param_count;
653 param_count++;
654 } else {
655 target = V_008DFC_SQ_EXP_MRT + color_count;
656 if (color_count == 0 &&
657 si_shader_ctx->key.alpha_func != PIPE_FUNC_ALWAYS)
658 si_alpha_test(bld_base, index);
659
660 color_count++;
661 }
662 break;
663 case TGSI_SEMANTIC_FOG:
664 case TGSI_SEMANTIC_GENERIC:
665 target = V_008DFC_SQ_EXP_PARAM + param_count;
666 shader->output[i].param_offset = param_count;
667 param_count++;
668 break;
669 default:
670 target = 0;
671 fprintf(stderr,
672 "Warning: SI unhandled output type:%d\n",
673 d->Semantic.Name);
674 }
675
676 si_llvm_init_export_args(bld_base, d, index, target, args);
677
678 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX ?
679 (d->Semantic.Name == TGSI_SEMANTIC_POSITION) :
680 (d->Semantic.Name == TGSI_SEMANTIC_COLOR)) {
681 if (last_args[0]) {
682 lp_build_intrinsic(base->gallivm->builder,
683 "llvm.SI.export",
684 LLVMVoidTypeInContext(base->gallivm->context),
685 last_args, 9);
686 }
687
688 memcpy(last_args, args, sizeof(args));
689 } else {
690 lp_build_intrinsic(base->gallivm->builder,
691 "llvm.SI.export",
692 LLVMVoidTypeInContext(base->gallivm->context),
693 args, 9);
694 }
695
696 }
697 }
698
699 if (depth_index >= 0 || stencil_index >= 0) {
700 LLVMValueRef out_ptr;
701 unsigned mask = 0;
702
703 /* Specify the target we are exporting */
704 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRTZ);
705
706 if (depth_index >= 0) {
707 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[depth_index][2];
708 args[5] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
709 mask |= 0x1;
710
711 if (stencil_index < 0) {
712 args[6] =
713 args[7] =
714 args[8] = args[5];
715 }
716 }
717
718 if (stencil_index >= 0) {
719 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[stencil_index][1];
720 args[7] =
721 args[8] =
722 args[6] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
723 mask |= 0x2;
724
725 if (depth_index < 0)
726 args[5] = args[6];
727 }
728
729 /* Specify which components to enable */
730 args[0] = lp_build_const_int32(base->gallivm, mask);
731
732 args[1] =
733 args[2] =
734 args[4] = uint->zero;
735
736 if (last_args[0])
737 lp_build_intrinsic(base->gallivm->builder,
738 "llvm.SI.export",
739 LLVMVoidTypeInContext(base->gallivm->context),
740 args, 9);
741 else
742 memcpy(last_args, args, sizeof(args));
743 }
744
745 if (!last_args[0]) {
746 assert(si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
747
748 /* Specify which components to enable */
749 last_args[0] = lp_build_const_int32(base->gallivm, 0x0);
750
751 /* Specify the target we are exporting */
752 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
753
754 /* Set COMPR flag to zero to export data as 32-bit */
755 last_args[4] = uint->zero;
756
757 /* dummy bits */
758 last_args[5]= uint->zero;
759 last_args[6]= uint->zero;
760 last_args[7]= uint->zero;
761 last_args[8]= uint->zero;
762 }
763
764 /* Specify whether the EXEC mask represents the valid mask */
765 last_args[1] = lp_build_const_int32(base->gallivm,
766 si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
767
768 /* Specify that this is the last export */
769 last_args[2] = lp_build_const_int32(base->gallivm, 1);
770
771 lp_build_intrinsic(base->gallivm->builder,
772 "llvm.SI.export",
773 LLVMVoidTypeInContext(base->gallivm->context),
774 last_args, 9);
775
776 /* XXX: Look up what this function does */
777 /* ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);*/
778 }
779
780 static void tex_fetch_args(
781 struct lp_build_tgsi_context * bld_base,
782 struct lp_build_emit_data * emit_data)
783 {
784 const struct tgsi_full_instruction * inst = emit_data->inst;
785 LLVMValueRef ptr;
786 LLVMValueRef offset;
787
788 /* WriteMask */
789 /* XXX: should be optimized using emit_data->inst->Dst[0].Register.WriteMask*/
790 emit_data->args[0] = lp_build_const_int32(bld_base->base.gallivm, 0xf);
791
792 /* Coordinates */
793 /* XXX: Not all sample instructions need 4 address arguments. */
794 if (inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
795 LLVMValueRef src_w;
796 unsigned chan;
797 LLVMValueRef coords[4];
798
799 emit_data->dst_type = LLVMVectorType(bld_base->base.elem_type, 4);
800 src_w = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
801
802 for (chan = 0; chan < 3; chan++ ) {
803 LLVMValueRef arg = lp_build_emit_fetch(bld_base,
804 emit_data->inst, 0, chan);
805 coords[chan] = lp_build_emit_llvm_binary(bld_base,
806 TGSI_OPCODE_DIV,
807 arg, src_w);
808 }
809 coords[3] = bld_base->base.one;
810 emit_data->args[1] = lp_build_gather_values(bld_base->base.gallivm,
811 coords, 4);
812 } else
813 emit_data->args[1] = lp_build_emit_fetch(bld_base, emit_data->inst,
814 0, LP_CHAN_ALL);
815
816 if (inst->Instruction.Opcode == TGSI_OPCODE_TEX2 ||
817 inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
818 inst->Instruction.Opcode == TGSI_OPCODE_TXL2) {
819 /* These instructions have additional operand that should be packed
820 * into the cube coord vector by radeon_llvm_emit_prepare_cube_coords.
821 * That operand should be passed as a float value in the args array
822 * right after the coord vector. After packing it's not used anymore,
823 * that's why arg_count is not increased */
824 emit_data->args[2] = lp_build_emit_fetch(bld_base, inst, 1, 0);
825 }
826
827 if ((inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
828 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE) &&
829 inst->Instruction.Opcode != TGSI_OPCODE_TXQ) {
830 radeon_llvm_emit_prepare_cube_coords(bld_base, emit_data, 1);
831 }
832
833 /* Resource */
834 ptr = use_sgpr(bld_base->base.gallivm, SGPR_CONST_PTR_V8I32, SI_SGPR_RESOURCE);
835 offset = lp_build_const_int32(bld_base->base.gallivm,
836 emit_data->inst->Src[1].Register.Index);
837 emit_data->args[2] = build_indexed_load(bld_base->base.gallivm,
838 ptr, offset);
839
840 /* Sampler */
841 ptr = use_sgpr(bld_base->base.gallivm, SGPR_CONST_PTR_V4I32, SI_SGPR_SAMPLER);
842 offset = lp_build_const_int32(bld_base->base.gallivm,
843 emit_data->inst->Src[1].Register.Index);
844 emit_data->args[3] = build_indexed_load(bld_base->base.gallivm,
845 ptr, offset);
846
847 /* Dimensions */
848 emit_data->args[4] = lp_build_const_int32(bld_base->base.gallivm,
849 emit_data->inst->Texture.Texture);
850
851 emit_data->arg_count = 5;
852 /* XXX: To optimize, we could use a float or v2f32, if the last bits of
853 * the writemask are clear */
854 emit_data->dst_type = LLVMVectorType(
855 LLVMFloatTypeInContext(bld_base->base.gallivm->context),
856 4);
857 }
858
859 static const struct lp_build_tgsi_action tex_action = {
860 .fetch_args = tex_fetch_args,
861 .emit = lp_build_tgsi_intrinsic,
862 .intr_name = "llvm.SI.sample"
863 };
864
865 static const struct lp_build_tgsi_action txb_action = {
866 .fetch_args = tex_fetch_args,
867 .emit = lp_build_tgsi_intrinsic,
868 .intr_name = "llvm.SI.sample.bias"
869 };
870
871 static const struct lp_build_tgsi_action txl_action = {
872 .fetch_args = tex_fetch_args,
873 .emit = lp_build_tgsi_intrinsic,
874 .intr_name = "llvm.SI.sample.lod"
875 };
876
877
878 int si_pipe_shader_create(
879 struct pipe_context *ctx,
880 struct si_pipe_shader *shader,
881 struct si_shader_key key)
882 {
883 struct r600_context *rctx = (struct r600_context*)ctx;
884 struct si_pipe_shader_selector *sel = shader->selector;
885 struct si_shader_context si_shader_ctx;
886 struct tgsi_shader_info shader_info;
887 struct lp_build_tgsi_context * bld_base;
888 LLVMModuleRef mod;
889 unsigned char * inst_bytes;
890 unsigned inst_byte_count;
891 unsigned i;
892 uint32_t *ptr;
893 bool dump;
894
895 dump = debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE);
896
897 assert(shader->shader.noutput == 0);
898 assert(shader->shader.ninterp == 0);
899 assert(shader->shader.ninput == 0);
900
901 memset(&si_shader_ctx, 0, sizeof(si_shader_ctx));
902 radeon_llvm_context_init(&si_shader_ctx.radeon_bld);
903 bld_base = &si_shader_ctx.radeon_bld.soa.bld_base;
904
905 tgsi_scan_shader(sel->tokens, &shader_info);
906 shader->shader.uses_kill = shader_info.uses_kill;
907 bld_base->info = &shader_info;
908 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
909 bld_base->emit_prologue = si_llvm_emit_prologue;
910 bld_base->emit_epilogue = si_llvm_emit_epilogue;
911
912 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
913 bld_base->op_actions[TGSI_OPCODE_TXB] = txb_action;
914 bld_base->op_actions[TGSI_OPCODE_TXL] = txl_action;
915 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
916
917 si_shader_ctx.radeon_bld.load_input = declare_input;
918 si_shader_ctx.tokens = sel->tokens;
919 tgsi_parse_init(&si_shader_ctx.parse, si_shader_ctx.tokens);
920 si_shader_ctx.shader = shader;
921 si_shader_ctx.key = key;
922 si_shader_ctx.type = si_shader_ctx.parse.FullHeader.Processor.Processor;
923 si_shader_ctx.rctx = rctx;
924
925 shader->shader.nr_cbufs = rctx->framebuffer.nr_cbufs;
926
927 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
928 * conversion fails. */
929 if (dump) {
930 tgsi_dump(sel->tokens, 0);
931 }
932
933 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
934 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
935 return -EINVAL;
936 }
937
938 radeon_llvm_finalize_module(&si_shader_ctx.radeon_bld);
939
940 mod = bld_base->base.gallivm->module;
941 if (dump) {
942 LLVMDumpModule(mod);
943 }
944 radeon_llvm_compile(mod, &inst_bytes, &inst_byte_count, "SI", dump);
945 if (dump) {
946 fprintf(stderr, "SI CODE:\n");
947 for (i = 0; i < inst_byte_count; i+=4 ) {
948 fprintf(stderr, "%02x%02x%02x%02x\n", inst_bytes[i + 3],
949 inst_bytes[i + 2], inst_bytes[i + 1],
950 inst_bytes[i]);
951 }
952 }
953
954 shader->num_sgprs = util_le32_to_cpu(*(uint32_t*)inst_bytes);
955 shader->num_vgprs = util_le32_to_cpu(*(uint32_t*)(inst_bytes + 4));
956 shader->spi_ps_input_ena = util_le32_to_cpu(*(uint32_t*)(inst_bytes + 8));
957
958 radeon_llvm_dispose(&si_shader_ctx.radeon_bld);
959 tgsi_parse_free(&si_shader_ctx.parse);
960
961 /* copy new shader */
962 si_resource_reference(&shader->bo, NULL);
963 shader->bo = si_resource_create_custom(ctx->screen, PIPE_USAGE_IMMUTABLE,
964 inst_byte_count - 12);
965 if (shader->bo == NULL) {
966 return -ENOMEM;
967 }
968
969 ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
970 if (0 /*R600_BIG_ENDIAN*/) {
971 for (i = 0; i < (inst_byte_count-12)/4; ++i) {
972 ptr[i] = util_bswap32(*(uint32_t*)(inst_bytes+12 + i*4));
973 }
974 } else {
975 memcpy(ptr, inst_bytes + 12, inst_byte_count - 12);
976 }
977 rctx->ws->buffer_unmap(shader->bo->cs_buf);
978
979 free(inst_bytes);
980
981 return 0;
982 }
983
984 void si_pipe_shader_destroy(struct pipe_context *ctx, struct si_pipe_shader *shader)
985 {
986 si_resource_reference(&shader->bo, NULL);
987 }