radeonsi: mark most intrinsics as readnone/nounwind
[mesa.git] / src / gallium / drivers / radeonsi / radeonsi_shader.c
1
2 /*
3 * Copyright 2012 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Tom Stellard <thomas.stellard@amd.com>
26 * Michel Dänzer <michel.daenzer@amd.com>
27 * Christian König <christian.koenig@amd.com>
28 */
29
30 #include "gallivm/lp_bld_tgsi_action.h"
31 #include "gallivm/lp_bld_const.h"
32 #include "gallivm/lp_bld_gather.h"
33 #include "gallivm/lp_bld_intr.h"
34 #include "gallivm/lp_bld_logic.h"
35 #include "gallivm/lp_bld_tgsi.h"
36 #include "gallivm/lp_bld_arit.h"
37 #include "radeon_llvm.h"
38 #include "radeon_llvm_emit.h"
39 #include "tgsi/tgsi_info.h"
40 #include "tgsi/tgsi_parse.h"
41 #include "tgsi/tgsi_scan.h"
42 #include "tgsi/tgsi_dump.h"
43
44 #include "radeonsi_pipe.h"
45 #include "radeonsi_shader.h"
46 #include "si_state.h"
47 #include "sid.h"
48
49 #include <assert.h>
50 #include <errno.h>
51 #include <stdio.h>
52
53 struct si_shader_context
54 {
55 struct radeon_llvm_context radeon_bld;
56 struct r600_context *rctx;
57 struct tgsi_parse_context parse;
58 struct tgsi_token * tokens;
59 struct si_pipe_shader *shader;
60 struct si_shader_key key;
61 unsigned type; /* TGSI_PROCESSOR_* specifies the type of shader. */
62 LLVMValueRef const_md;
63 /* struct list_head inputs; */
64 /* unsigned * input_mappings *//* From TGSI to SI hw */
65 /* struct tgsi_shader_info info;*/
66 };
67
68 static struct si_shader_context * si_shader_context(
69 struct lp_build_tgsi_context * bld_base)
70 {
71 return (struct si_shader_context *)bld_base;
72 }
73
74
75 #define PERSPECTIVE_BASE 0
76 #define LINEAR_BASE 9
77
78 #define SAMPLE_OFFSET 0
79 #define CENTER_OFFSET 2
80 #define CENTROID_OFSET 4
81
82 #define USE_SGPR_MAX_SUFFIX_LEN 5
83 #define CONST_ADDR_SPACE 2
84 #define USER_SGPR_ADDR_SPACE 8
85
86 /**
87 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad
88 *
89 * @param offset The offset parameter specifies the number of
90 * elements to offset, not the number of bytes or dwords. An element is the
91 * the type pointed to by the base_ptr parameter (e.g. int is the element of
92 * an int* pointer)
93 *
94 * When LLVM lowers the load instruction, it will convert the element offset
95 * into a dword offset automatically.
96 *
97 */
98 static LLVMValueRef build_indexed_load(
99 struct si_shader_context * si_shader_ctx,
100 LLVMValueRef base_ptr,
101 LLVMValueRef offset)
102 {
103 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
104
105 LLVMValueRef computed_ptr = LLVMBuildGEP(
106 base->gallivm->builder, base_ptr, &offset, 1, "");
107
108 LLVMValueRef result = LLVMBuildLoad(base->gallivm->builder, computed_ptr, "");
109 LLVMSetMetadata(result, 1, si_shader_ctx->const_md);
110 return result;
111 }
112
113 static void declare_input_vs(
114 struct si_shader_context * si_shader_ctx,
115 unsigned input_index,
116 const struct tgsi_full_declaration *decl)
117 {
118 LLVMValueRef t_list_ptr;
119 LLVMValueRef t_offset;
120 LLVMValueRef t_list;
121 LLVMValueRef attribute_offset;
122 LLVMValueRef buffer_index_reg;
123 LLVMValueRef args[3];
124 LLVMTypeRef vec4_type;
125 LLVMValueRef input;
126 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
127 //struct pipe_vertex_element *velem = &rctx->vertex_elements->elements[input_index];
128 unsigned chan;
129
130 /* Load the T list */
131 t_list_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_VERTEX_BUFFER);
132
133 t_offset = lp_build_const_int32(base->gallivm, input_index);
134
135 t_list = build_indexed_load(si_shader_ctx, t_list_ptr, t_offset);
136
137 /* Build the attribute offset */
138 attribute_offset = lp_build_const_int32(base->gallivm, 0);
139
140 /* Load the buffer index, which is always stored in VGPR0
141 * for Vertex Shaders */
142 buffer_index_reg = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_VERTEX_INDEX);
143
144 vec4_type = LLVMVectorType(base->elem_type, 4);
145 args[0] = t_list;
146 args[1] = attribute_offset;
147 args[2] = buffer_index_reg;
148 input = build_intrinsic(base->gallivm->builder,
149 "llvm.SI.vs.load.input", vec4_type, args, 3,
150 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
151
152 /* Break up the vec4 into individual components */
153 for (chan = 0; chan < 4; chan++) {
154 LLVMValueRef llvm_chan = lp_build_const_int32(base->gallivm, chan);
155 /* XXX: Use a helper function for this. There is one in
156 * tgsi_llvm.c. */
157 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, chan)] =
158 LLVMBuildExtractElement(base->gallivm->builder,
159 input, llvm_chan, "");
160 }
161 }
162
163 static void declare_input_fs(
164 struct si_shader_context * si_shader_ctx,
165 unsigned input_index,
166 const struct tgsi_full_declaration *decl)
167 {
168 struct si_shader *shader = &si_shader_ctx->shader->shader;
169 struct lp_build_context * base =
170 &si_shader_ctx->radeon_bld.soa.bld_base.base;
171 struct gallivm_state * gallivm = base->gallivm;
172 LLVMTypeRef input_type = LLVMFloatTypeInContext(gallivm->context);
173 LLVMValueRef main_fn = si_shader_ctx->radeon_bld.main_fn;
174
175 LLVMValueRef interp_param;
176 const char * intr_name;
177
178 /* This value is:
179 * [15:0] NewPrimMask (Bit mask for each quad. It is set it the
180 * quad begins a new primitive. Bit 0 always needs
181 * to be unset)
182 * [32:16] ParamOffset
183 *
184 */
185 LLVMValueRef params = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_PRIM_MASK);
186 LLVMValueRef attr_number;
187
188 unsigned chan;
189
190 if (decl->Semantic.Name == TGSI_SEMANTIC_POSITION) {
191 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
192 unsigned soa_index =
193 radeon_llvm_reg_index_soa(input_index, chan);
194 si_shader_ctx->radeon_bld.inputs[soa_index] =
195 LLVMGetParam(main_fn, SI_PARAM_POS_X_FLOAT + chan);
196
197 if (chan == 3)
198 /* RCP for fragcoord.w */
199 si_shader_ctx->radeon_bld.inputs[soa_index] =
200 LLVMBuildFDiv(gallivm->builder,
201 lp_build_const_float(gallivm, 1.0f),
202 si_shader_ctx->radeon_bld.inputs[soa_index],
203 "");
204 }
205 return;
206 }
207
208 if (decl->Semantic.Name == TGSI_SEMANTIC_FACE) {
209 LLVMValueRef face, is_face_positive;
210
211 face = LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE);
212
213 is_face_positive = LLVMBuildFCmp(gallivm->builder,
214 LLVMRealUGT, face,
215 lp_build_const_float(gallivm, 0.0f),
216 "");
217
218 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 0)] =
219 LLVMBuildSelect(gallivm->builder,
220 is_face_positive,
221 lp_build_const_float(gallivm, 1.0f),
222 lp_build_const_float(gallivm, 0.0f),
223 "");
224 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 1)] =
225 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 2)] =
226 lp_build_const_float(gallivm, 0.0f);
227 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 3)] =
228 lp_build_const_float(gallivm, 1.0f);
229
230 return;
231 }
232
233 shader->input[input_index].param_offset = shader->ninterp++;
234 attr_number = lp_build_const_int32(gallivm,
235 shader->input[input_index].param_offset);
236
237 /* XXX: Handle all possible interpolation modes */
238 switch (decl->Interp.Interpolate) {
239 case TGSI_INTERPOLATE_COLOR:
240 if (si_shader_ctx->key.flatshade) {
241 interp_param = 0;
242 } else {
243 if (decl->Interp.Centroid)
244 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTROID);
245 else
246 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTER);
247 }
248 break;
249 case TGSI_INTERPOLATE_CONSTANT:
250 interp_param = 0;
251 break;
252 case TGSI_INTERPOLATE_LINEAR:
253 if (decl->Interp.Centroid)
254 interp_param = LLVMGetParam(main_fn, SI_PARAM_LINEAR_CENTROID);
255 else
256 interp_param = LLVMGetParam(main_fn, SI_PARAM_LINEAR_CENTER);
257 break;
258 case TGSI_INTERPOLATE_PERSPECTIVE:
259 if (decl->Interp.Centroid)
260 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTROID);
261 else
262 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTER);
263 break;
264 default:
265 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
266 return;
267 }
268
269 intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
270
271 /* XXX: Could there be more than TGSI_NUM_CHANNELS (4) ? */
272 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR &&
273 si_shader_ctx->key.color_two_side) {
274 LLVMValueRef args[4];
275 LLVMValueRef face, is_face_positive;
276 LLVMValueRef back_attr_number =
277 lp_build_const_int32(gallivm,
278 shader->input[input_index].param_offset + 1);
279
280 face = LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE);
281
282 is_face_positive = LLVMBuildFCmp(gallivm->builder,
283 LLVMRealUGT, face,
284 lp_build_const_float(gallivm, 0.0f),
285 "");
286
287 args[2] = params;
288 args[3] = interp_param;
289 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
290 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
291 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
292 LLVMValueRef front, back;
293
294 args[0] = llvm_chan;
295 args[1] = attr_number;
296 front = build_intrinsic(base->gallivm->builder, intr_name,
297 input_type, args, args[3] ? 4 : 3,
298 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
299
300 args[1] = back_attr_number;
301 back = build_intrinsic(base->gallivm->builder, intr_name,
302 input_type, args, args[3] ? 4 : 3,
303 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
304
305 si_shader_ctx->radeon_bld.inputs[soa_index] =
306 LLVMBuildSelect(gallivm->builder,
307 is_face_positive,
308 front,
309 back,
310 "");
311 }
312
313 shader->ninterp++;
314 } else {
315 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
316 LLVMValueRef args[4];
317 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
318 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
319 args[0] = llvm_chan;
320 args[1] = attr_number;
321 args[2] = params;
322 args[3] = interp_param;
323 si_shader_ctx->radeon_bld.inputs[soa_index] =
324 build_intrinsic(base->gallivm->builder, intr_name,
325 input_type, args, args[3] ? 4 : 3,
326 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
327 }
328 }
329 }
330
331 static void declare_input(
332 struct radeon_llvm_context * radeon_bld,
333 unsigned input_index,
334 const struct tgsi_full_declaration *decl)
335 {
336 struct si_shader_context * si_shader_ctx =
337 si_shader_context(&radeon_bld->soa.bld_base);
338 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
339 declare_input_vs(si_shader_ctx, input_index, decl);
340 } else if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
341 declare_input_fs(si_shader_ctx, input_index, decl);
342 } else {
343 fprintf(stderr, "Warning: Unsupported shader type,\n");
344 }
345 }
346
347 static LLVMValueRef fetch_constant(
348 struct lp_build_tgsi_context * bld_base,
349 const struct tgsi_full_src_register *reg,
350 enum tgsi_opcode_type type,
351 unsigned swizzle)
352 {
353 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
354 struct lp_build_context * base = &bld_base->base;
355
356 LLVMValueRef ptr;
357 LLVMValueRef args[2];
358 LLVMValueRef result;
359
360 if (swizzle == LP_CHAN_ALL) {
361 unsigned chan;
362 LLVMValueRef values[4];
363 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
364 values[chan] = fetch_constant(bld_base, reg, type, chan);
365
366 return lp_build_gather_values(bld_base->base.gallivm, values, 4);
367 }
368
369 /* Load the resource descriptor */
370 ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
371 args[0] = build_indexed_load(si_shader_ctx, ptr, bld_base->uint_bld.zero);
372
373 args[1] = lp_build_const_int32(base->gallivm, (reg->Register.Index * 4 + swizzle) * 4);
374 if (reg->Register.Indirect) {
375 const struct tgsi_ind_register *ireg = &reg->Indirect;
376 LLVMValueRef addr = si_shader_ctx->radeon_bld.soa.addr[ireg->Index][ireg->Swizzle];
377 LLVMValueRef idx = LLVMBuildLoad(base->gallivm->builder, addr, "load addr reg");
378 idx = lp_build_mul_imm(&bld_base->uint_bld, idx, 16);
379 args[1] = lp_build_add(&bld_base->uint_bld, idx, args[1]);
380 }
381
382 result = build_intrinsic(base->gallivm->builder, "llvm.SI.load.const", base->elem_type,
383 args, 2, LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
384
385 return bitcast(bld_base, type, result);
386 }
387
388 /* Initialize arguments for the shader export intrinsic */
389 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
390 struct tgsi_full_declaration *d,
391 unsigned index,
392 unsigned target,
393 LLVMValueRef *args)
394 {
395 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
396 struct lp_build_context *uint =
397 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
398 struct lp_build_context *base = &bld_base->base;
399 unsigned compressed = 0;
400 unsigned chan;
401
402 if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
403 int cbuf = target - V_008DFC_SQ_EXP_MRT;
404
405 if (cbuf >= 0 && cbuf < 8) {
406 compressed = (si_shader_ctx->key.export_16bpc >> cbuf) & 0x1;
407
408 if (compressed)
409 si_shader_ctx->shader->spi_shader_col_format |=
410 V_028714_SPI_SHADER_FP16_ABGR << (4 * cbuf);
411 else
412 si_shader_ctx->shader->spi_shader_col_format |=
413 V_028714_SPI_SHADER_32_ABGR << (4 * cbuf);
414 }
415 }
416
417 if (compressed) {
418 /* Pixel shader needs to pack output values before export */
419 for (chan = 0; chan < 2; chan++ ) {
420 LLVMValueRef *out_ptr =
421 si_shader_ctx->radeon_bld.soa.outputs[index];
422 args[0] = LLVMBuildLoad(base->gallivm->builder,
423 out_ptr[2 * chan], "");
424 args[1] = LLVMBuildLoad(base->gallivm->builder,
425 out_ptr[2 * chan + 1], "");
426 args[chan + 5] =
427 build_intrinsic(base->gallivm->builder,
428 "llvm.SI.packf16",
429 LLVMInt32TypeInContext(base->gallivm->context),
430 args, 2,
431 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
432 args[chan + 7] = args[chan + 5] =
433 LLVMBuildBitCast(base->gallivm->builder,
434 args[chan + 5],
435 LLVMFloatTypeInContext(base->gallivm->context),
436 "");
437 }
438
439 /* Set COMPR flag */
440 args[4] = uint->one;
441 } else {
442 for (chan = 0; chan < 4; chan++ ) {
443 LLVMValueRef out_ptr =
444 si_shader_ctx->radeon_bld.soa.outputs[index][chan];
445 /* +5 because the first output value will be
446 * the 6th argument to the intrinsic. */
447 args[chan + 5] = LLVMBuildLoad(base->gallivm->builder,
448 out_ptr, "");
449 }
450
451 /* Clear COMPR flag */
452 args[4] = uint->zero;
453 }
454
455 /* XXX: This controls which components of the output
456 * registers actually get exported. (e.g bit 0 means export
457 * X component, bit 1 means export Y component, etc.) I'm
458 * hard coding this to 0xf for now. In the future, we might
459 * want to do something else. */
460 args[0] = lp_build_const_int32(base->gallivm, 0xf);
461
462 /* Specify whether the EXEC mask represents the valid mask */
463 args[1] = uint->zero;
464
465 /* Specify whether this is the last export */
466 args[2] = uint->zero;
467
468 /* Specify the target we are exporting */
469 args[3] = lp_build_const_int32(base->gallivm, target);
470
471 /* XXX: We probably need to keep track of the output
472 * values, so we know what we are passing to the next
473 * stage. */
474 }
475
476 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
477 unsigned index)
478 {
479 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
480 struct gallivm_state *gallivm = bld_base->base.gallivm;
481
482 if (si_shader_ctx->key.alpha_func != PIPE_FUNC_NEVER) {
483 LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][3];
484 LLVMValueRef alpha_pass =
485 lp_build_cmp(&bld_base->base,
486 si_shader_ctx->key.alpha_func,
487 LLVMBuildLoad(gallivm->builder, out_ptr, ""),
488 lp_build_const_float(gallivm, si_shader_ctx->key.alpha_ref));
489 LLVMValueRef arg =
490 lp_build_select(&bld_base->base,
491 alpha_pass,
492 lp_build_const_float(gallivm, 1.0f),
493 lp_build_const_float(gallivm, -1.0f));
494
495 build_intrinsic(gallivm->builder,
496 "llvm.AMDGPU.kill",
497 LLVMVoidTypeInContext(gallivm->context),
498 &arg, 1, 0);
499 } else {
500 build_intrinsic(gallivm->builder,
501 "llvm.AMDGPU.kilp",
502 LLVMVoidTypeInContext(gallivm->context),
503 NULL, 0, 0);
504 }
505 }
506
507 /* XXX: This is partially implemented for VS only at this point. It is not complete */
508 static void si_llvm_emit_epilogue(struct lp_build_tgsi_context * bld_base)
509 {
510 struct si_shader_context * si_shader_ctx = si_shader_context(bld_base);
511 struct si_shader * shader = &si_shader_ctx->shader->shader;
512 struct lp_build_context * base = &bld_base->base;
513 struct lp_build_context * uint =
514 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
515 struct tgsi_parse_context *parse = &si_shader_ctx->parse;
516 LLVMValueRef args[9];
517 LLVMValueRef last_args[9] = { 0 };
518 unsigned color_count = 0;
519 unsigned param_count = 0;
520 int depth_index = -1, stencil_index = -1;
521
522 while (!tgsi_parse_end_of_tokens(parse)) {
523 struct tgsi_full_declaration *d =
524 &parse->FullToken.FullDeclaration;
525 unsigned target;
526 unsigned index;
527 int i;
528
529 tgsi_parse_token(parse);
530
531 if (parse->FullToken.Token.Type == TGSI_TOKEN_TYPE_PROPERTY &&
532 parse->FullToken.FullProperty.Property.PropertyName ==
533 TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS)
534 shader->fs_write_all = TRUE;
535
536 if (parse->FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
537 continue;
538
539 switch (d->Declaration.File) {
540 case TGSI_FILE_INPUT:
541 i = shader->ninput++;
542 shader->input[i].name = d->Semantic.Name;
543 shader->input[i].sid = d->Semantic.Index;
544 shader->input[i].interpolate = d->Interp.Interpolate;
545 shader->input[i].centroid = d->Interp.Centroid;
546 continue;
547
548 case TGSI_FILE_OUTPUT:
549 i = shader->noutput++;
550 shader->output[i].name = d->Semantic.Name;
551 shader->output[i].sid = d->Semantic.Index;
552 shader->output[i].interpolate = d->Interp.Interpolate;
553 break;
554
555 default:
556 continue;
557 }
558
559 for (index = d->Range.First; index <= d->Range.Last; index++) {
560 /* Select the correct target */
561 switch(d->Semantic.Name) {
562 case TGSI_SEMANTIC_PSIZE:
563 target = V_008DFC_SQ_EXP_POS;
564 break;
565 case TGSI_SEMANTIC_POSITION:
566 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
567 target = V_008DFC_SQ_EXP_POS;
568 break;
569 } else {
570 depth_index = index;
571 continue;
572 }
573 case TGSI_SEMANTIC_STENCIL:
574 stencil_index = index;
575 continue;
576 case TGSI_SEMANTIC_COLOR:
577 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
578 case TGSI_SEMANTIC_BCOLOR:
579 target = V_008DFC_SQ_EXP_PARAM + param_count;
580 shader->output[i].param_offset = param_count;
581 param_count++;
582 } else {
583 target = V_008DFC_SQ_EXP_MRT + color_count;
584 if (color_count == 0 &&
585 si_shader_ctx->key.alpha_func != PIPE_FUNC_ALWAYS)
586 si_alpha_test(bld_base, index);
587
588 color_count++;
589 }
590 break;
591 case TGSI_SEMANTIC_FOG:
592 case TGSI_SEMANTIC_GENERIC:
593 target = V_008DFC_SQ_EXP_PARAM + param_count;
594 shader->output[i].param_offset = param_count;
595 param_count++;
596 break;
597 default:
598 target = 0;
599 fprintf(stderr,
600 "Warning: SI unhandled output type:%d\n",
601 d->Semantic.Name);
602 }
603
604 si_llvm_init_export_args(bld_base, d, index, target, args);
605
606 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX ?
607 (d->Semantic.Name == TGSI_SEMANTIC_POSITION) :
608 (d->Semantic.Name == TGSI_SEMANTIC_COLOR)) {
609 if (last_args[0]) {
610 lp_build_intrinsic(base->gallivm->builder,
611 "llvm.SI.export",
612 LLVMVoidTypeInContext(base->gallivm->context),
613 last_args, 9);
614 }
615
616 memcpy(last_args, args, sizeof(args));
617 } else {
618 lp_build_intrinsic(base->gallivm->builder,
619 "llvm.SI.export",
620 LLVMVoidTypeInContext(base->gallivm->context),
621 args, 9);
622 }
623
624 }
625 }
626
627 if (depth_index >= 0 || stencil_index >= 0) {
628 LLVMValueRef out_ptr;
629 unsigned mask = 0;
630
631 /* Specify the target we are exporting */
632 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRTZ);
633
634 if (depth_index >= 0) {
635 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[depth_index][2];
636 args[5] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
637 mask |= 0x1;
638
639 if (stencil_index < 0) {
640 args[6] =
641 args[7] =
642 args[8] = args[5];
643 }
644 }
645
646 if (stencil_index >= 0) {
647 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[stencil_index][1];
648 args[7] =
649 args[8] =
650 args[6] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
651 mask |= 0x2;
652
653 if (depth_index < 0)
654 args[5] = args[6];
655 }
656
657 /* Specify which components to enable */
658 args[0] = lp_build_const_int32(base->gallivm, mask);
659
660 args[1] =
661 args[2] =
662 args[4] = uint->zero;
663
664 if (last_args[0])
665 lp_build_intrinsic(base->gallivm->builder,
666 "llvm.SI.export",
667 LLVMVoidTypeInContext(base->gallivm->context),
668 args, 9);
669 else
670 memcpy(last_args, args, sizeof(args));
671 }
672
673 if (!last_args[0]) {
674 assert(si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
675
676 /* Specify which components to enable */
677 last_args[0] = lp_build_const_int32(base->gallivm, 0x0);
678
679 /* Specify the target we are exporting */
680 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
681
682 /* Set COMPR flag to zero to export data as 32-bit */
683 last_args[4] = uint->zero;
684
685 /* dummy bits */
686 last_args[5]= uint->zero;
687 last_args[6]= uint->zero;
688 last_args[7]= uint->zero;
689 last_args[8]= uint->zero;
690
691 si_shader_ctx->shader->spi_shader_col_format |=
692 V_028714_SPI_SHADER_32_ABGR;
693 }
694
695 /* Specify whether the EXEC mask represents the valid mask */
696 last_args[1] = lp_build_const_int32(base->gallivm,
697 si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
698
699 if (shader->fs_write_all && shader->nr_cbufs > 1) {
700 int i;
701
702 /* Specify that this is not yet the last export */
703 last_args[2] = lp_build_const_int32(base->gallivm, 0);
704
705 for (i = 1; i < shader->nr_cbufs; i++) {
706 /* Specify the target we are exporting */
707 last_args[3] = lp_build_const_int32(base->gallivm,
708 V_008DFC_SQ_EXP_MRT + i);
709
710 lp_build_intrinsic(base->gallivm->builder,
711 "llvm.SI.export",
712 LLVMVoidTypeInContext(base->gallivm->context),
713 last_args, 9);
714
715 si_shader_ctx->shader->spi_shader_col_format |=
716 si_shader_ctx->shader->spi_shader_col_format << 4;
717 }
718
719 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
720 }
721
722 /* Specify that this is the last export */
723 last_args[2] = lp_build_const_int32(base->gallivm, 1);
724
725 lp_build_intrinsic(base->gallivm->builder,
726 "llvm.SI.export",
727 LLVMVoidTypeInContext(base->gallivm->context),
728 last_args, 9);
729
730 /* XXX: Look up what this function does */
731 /* ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);*/
732 }
733
734 static void tex_fetch_args(
735 struct lp_build_tgsi_context * bld_base,
736 struct lp_build_emit_data * emit_data)
737 {
738 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
739 struct gallivm_state *gallivm = bld_base->base.gallivm;
740 const struct tgsi_full_instruction * inst = emit_data->inst;
741 unsigned opcode = inst->Instruction.Opcode;
742 unsigned target = inst->Texture.Texture;
743 LLVMValueRef ptr;
744 LLVMValueRef offset;
745 LLVMValueRef coords[4];
746 LLVMValueRef address[16];
747 unsigned count = 0;
748 unsigned chan;
749
750 /* WriteMask */
751 /* XXX: should be optimized using emit_data->inst->Dst[0].Register.WriteMask*/
752 emit_data->args[0] = lp_build_const_int32(bld_base->base.gallivm, 0xf);
753
754 /* Fetch and project texture coordinates */
755 coords[3] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
756 for (chan = 0; chan < 3; chan++ ) {
757 coords[chan] = lp_build_emit_fetch(bld_base,
758 emit_data->inst, 0,
759 chan);
760 if (opcode == TGSI_OPCODE_TXP)
761 coords[chan] = lp_build_emit_llvm_binary(bld_base,
762 TGSI_OPCODE_DIV,
763 coords[chan],
764 coords[3]);
765 }
766
767 if (opcode == TGSI_OPCODE_TXP)
768 coords[3] = bld_base->base.one;
769
770 /* Pack LOD bias value */
771 if (opcode == TGSI_OPCODE_TXB)
772 address[count++] = coords[3];
773
774 if ((target == TGSI_TEXTURE_CUBE || target == TGSI_TEXTURE_SHADOWCUBE) &&
775 opcode != TGSI_OPCODE_TXQ)
776 radeon_llvm_emit_prepare_cube_coords(bld_base, emit_data, coords);
777
778 /* Pack depth comparison value */
779 switch (target) {
780 case TGSI_TEXTURE_SHADOW1D:
781 case TGSI_TEXTURE_SHADOW1D_ARRAY:
782 case TGSI_TEXTURE_SHADOW2D:
783 case TGSI_TEXTURE_SHADOWRECT:
784 address[count++] = coords[2];
785 break;
786 case TGSI_TEXTURE_SHADOWCUBE:
787 case TGSI_TEXTURE_SHADOW2D_ARRAY:
788 address[count++] = coords[3];
789 break;
790 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
791 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, 0);
792 }
793
794 /* Pack texture coordinates */
795 address[count++] = coords[0];
796 switch (target) {
797 case TGSI_TEXTURE_2D:
798 case TGSI_TEXTURE_2D_ARRAY:
799 case TGSI_TEXTURE_3D:
800 case TGSI_TEXTURE_CUBE:
801 case TGSI_TEXTURE_RECT:
802 case TGSI_TEXTURE_SHADOW2D:
803 case TGSI_TEXTURE_SHADOWRECT:
804 case TGSI_TEXTURE_SHADOW2D_ARRAY:
805 case TGSI_TEXTURE_SHADOWCUBE:
806 case TGSI_TEXTURE_2D_MSAA:
807 case TGSI_TEXTURE_2D_ARRAY_MSAA:
808 case TGSI_TEXTURE_CUBE_ARRAY:
809 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
810 address[count++] = coords[1];
811 }
812 switch (target) {
813 case TGSI_TEXTURE_3D:
814 case TGSI_TEXTURE_CUBE:
815 case TGSI_TEXTURE_SHADOWCUBE:
816 case TGSI_TEXTURE_CUBE_ARRAY:
817 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
818 address[count++] = coords[2];
819 }
820
821 /* Pack array slice */
822 switch (target) {
823 case TGSI_TEXTURE_1D_ARRAY:
824 address[count++] = coords[1];
825 }
826 switch (target) {
827 case TGSI_TEXTURE_2D_ARRAY:
828 case TGSI_TEXTURE_2D_ARRAY_MSAA:
829 case TGSI_TEXTURE_SHADOW2D_ARRAY:
830 address[count++] = coords[2];
831 }
832 switch (target) {
833 case TGSI_TEXTURE_CUBE_ARRAY:
834 case TGSI_TEXTURE_SHADOW1D_ARRAY:
835 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
836 address[count++] = coords[3];
837 }
838
839 /* Pack LOD */
840 if (opcode == TGSI_OPCODE_TXL)
841 address[count++] = coords[3];
842
843 if (count > 16) {
844 assert(!"Cannot handle more than 16 texture address parameters");
845 count = 16;
846 }
847
848 for (chan = 0; chan < count; chan++ ) {
849 address[chan] = LLVMBuildBitCast(gallivm->builder,
850 address[chan],
851 LLVMInt32TypeInContext(gallivm->context),
852 "");
853 }
854
855 /* Pad to power of two vector */
856 while (count < util_next_power_of_two(count))
857 address[count++] = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
858
859 emit_data->args[1] = lp_build_gather_values(gallivm, address, count);
860
861 /* Resource */
862 ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_RESOURCE);
863 offset = lp_build_const_int32(bld_base->base.gallivm,
864 emit_data->inst->Src[1].Register.Index);
865 emit_data->args[2] = build_indexed_load(si_shader_ctx,
866 ptr, offset);
867
868 /* Sampler */
869 ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_SAMPLER);
870 offset = lp_build_const_int32(bld_base->base.gallivm,
871 emit_data->inst->Src[1].Register.Index);
872 emit_data->args[3] = build_indexed_load(si_shader_ctx,
873 ptr, offset);
874
875 /* Dimensions */
876 emit_data->args[4] = lp_build_const_int32(bld_base->base.gallivm, target);
877
878 emit_data->arg_count = 5;
879 /* XXX: To optimize, we could use a float or v2f32, if the last bits of
880 * the writemask are clear */
881 emit_data->dst_type = LLVMVectorType(
882 LLVMFloatTypeInContext(bld_base->base.gallivm->context),
883 4);
884 }
885
886 static void build_tex_intrinsic(const struct lp_build_tgsi_action * action,
887 struct lp_build_tgsi_context * bld_base,
888 struct lp_build_emit_data * emit_data)
889 {
890 struct lp_build_context * base = &bld_base->base;
891 char intr_name[23];
892
893 sprintf(intr_name, "%sv%ui32", action->intr_name,
894 LLVMGetVectorSize(LLVMTypeOf(emit_data->args[1])));
895
896 emit_data->output[emit_data->chan] = build_intrinsic(
897 base->gallivm->builder, intr_name, emit_data->dst_type,
898 emit_data->args, emit_data->arg_count,
899 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
900 }
901
902 static const struct lp_build_tgsi_action tex_action = {
903 .fetch_args = tex_fetch_args,
904 .emit = build_tex_intrinsic,
905 .intr_name = "llvm.SI.sample."
906 };
907
908 static const struct lp_build_tgsi_action txb_action = {
909 .fetch_args = tex_fetch_args,
910 .emit = build_tex_intrinsic,
911 .intr_name = "llvm.SI.sampleb."
912 };
913
914 static const struct lp_build_tgsi_action txl_action = {
915 .fetch_args = tex_fetch_args,
916 .emit = build_tex_intrinsic,
917 .intr_name = "llvm.SI.samplel."
918 };
919
920 static void create_meta_data(struct si_shader_context *si_shader_ctx)
921 {
922 struct gallivm_state *gallivm = si_shader_ctx->radeon_bld.soa.bld_base.base.gallivm;
923 LLVMValueRef args[3];
924
925 args[0] = LLVMMDStringInContext(gallivm->context, "const", 5);
926 args[1] = 0;
927 args[2] = lp_build_const_int32(gallivm, 1);
928
929 si_shader_ctx->const_md = LLVMMDNodeInContext(gallivm->context, args, 3);
930 }
931
932 static void create_function(struct si_shader_context *si_shader_ctx)
933 {
934 struct gallivm_state *gallivm = si_shader_ctx->radeon_bld.soa.bld_base.base.gallivm;
935 LLVMTypeRef params[20], f32, i8, i32, v2i32, v3i32;
936 unsigned i;
937
938 i8 = LLVMInt8TypeInContext(gallivm->context);
939 i32 = LLVMInt32TypeInContext(gallivm->context);
940 f32 = LLVMFloatTypeInContext(gallivm->context);
941 v2i32 = LLVMVectorType(i32, 2);
942 v3i32 = LLVMVectorType(i32, 3);
943
944 params[SI_PARAM_CONST] = LLVMPointerType(LLVMVectorType(i8, 16), CONST_ADDR_SPACE);
945 params[SI_PARAM_SAMPLER] = params[SI_PARAM_CONST];
946 params[SI_PARAM_RESOURCE] = LLVMPointerType(LLVMVectorType(i8, 32), CONST_ADDR_SPACE);
947
948 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
949 params[SI_PARAM_VERTEX_BUFFER] = params[SI_PARAM_SAMPLER];
950 params[SI_PARAM_VERTEX_INDEX] = i32;
951 radeon_llvm_create_func(&si_shader_ctx->radeon_bld, params, 5);
952
953 } else {
954 params[SI_PARAM_PRIM_MASK] = i32;
955 params[SI_PARAM_PERSP_SAMPLE] = v2i32;
956 params[SI_PARAM_PERSP_CENTER] = v2i32;
957 params[SI_PARAM_PERSP_CENTROID] = v2i32;
958 params[SI_PARAM_PERSP_PULL_MODEL] = v3i32;
959 params[SI_PARAM_LINEAR_SAMPLE] = v2i32;
960 params[SI_PARAM_LINEAR_CENTER] = v2i32;
961 params[SI_PARAM_LINEAR_CENTROID] = v2i32;
962 params[SI_PARAM_LINE_STIPPLE_TEX] = f32;
963 params[SI_PARAM_POS_X_FLOAT] = f32;
964 params[SI_PARAM_POS_Y_FLOAT] = f32;
965 params[SI_PARAM_POS_Z_FLOAT] = f32;
966 params[SI_PARAM_POS_W_FLOAT] = f32;
967 params[SI_PARAM_FRONT_FACE] = f32;
968 params[SI_PARAM_ANCILLARY] = f32;
969 params[SI_PARAM_SAMPLE_COVERAGE] = f32;
970 params[SI_PARAM_POS_FIXED_PT] = f32;
971 radeon_llvm_create_func(&si_shader_ctx->radeon_bld, params, 20);
972 }
973
974 radeon_llvm_shader_type(si_shader_ctx->radeon_bld.main_fn, si_shader_ctx->type);
975 for (i = SI_PARAM_CONST; i <= SI_PARAM_VERTEX_BUFFER; ++i) {
976 LLVMValueRef P = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, i);
977 LLVMAddAttribute(P, LLVMInRegAttribute);
978 }
979 }
980
981 int si_pipe_shader_create(
982 struct pipe_context *ctx,
983 struct si_pipe_shader *shader,
984 struct si_shader_key key)
985 {
986 struct r600_context *rctx = (struct r600_context*)ctx;
987 struct si_pipe_shader_selector *sel = shader->selector;
988 struct si_shader_context si_shader_ctx;
989 struct tgsi_shader_info shader_info;
990 struct lp_build_tgsi_context * bld_base;
991 LLVMModuleRef mod;
992 unsigned char * inst_bytes;
993 unsigned inst_byte_count;
994 unsigned i;
995 uint32_t *ptr;
996 bool dump;
997
998 dump = debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE);
999
1000 assert(shader->shader.noutput == 0);
1001 assert(shader->shader.ninterp == 0);
1002 assert(shader->shader.ninput == 0);
1003
1004 memset(&si_shader_ctx, 0, sizeof(si_shader_ctx));
1005 radeon_llvm_context_init(&si_shader_ctx.radeon_bld);
1006 bld_base = &si_shader_ctx.radeon_bld.soa.bld_base;
1007
1008 tgsi_scan_shader(sel->tokens, &shader_info);
1009 shader->shader.uses_kill = shader_info.uses_kill;
1010 bld_base->info = &shader_info;
1011 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
1012 bld_base->emit_epilogue = si_llvm_emit_epilogue;
1013
1014 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
1015 bld_base->op_actions[TGSI_OPCODE_TXB] = txb_action;
1016 bld_base->op_actions[TGSI_OPCODE_TXL] = txl_action;
1017 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
1018
1019 si_shader_ctx.radeon_bld.load_input = declare_input;
1020 si_shader_ctx.tokens = sel->tokens;
1021 tgsi_parse_init(&si_shader_ctx.parse, si_shader_ctx.tokens);
1022 si_shader_ctx.shader = shader;
1023 si_shader_ctx.key = key;
1024 si_shader_ctx.type = si_shader_ctx.parse.FullHeader.Processor.Processor;
1025 si_shader_ctx.rctx = rctx;
1026
1027 create_meta_data(&si_shader_ctx);
1028 create_function(&si_shader_ctx);
1029
1030 shader->shader.nr_cbufs = rctx->framebuffer.nr_cbufs;
1031
1032 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
1033 * conversion fails. */
1034 if (dump) {
1035 tgsi_dump(sel->tokens, 0);
1036 }
1037
1038 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
1039 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
1040 return -EINVAL;
1041 }
1042
1043 radeon_llvm_finalize_module(&si_shader_ctx.radeon_bld);
1044
1045 mod = bld_base->base.gallivm->module;
1046 if (dump) {
1047 LLVMDumpModule(mod);
1048 }
1049 radeon_llvm_compile(mod, &inst_bytes, &inst_byte_count, "SI", dump);
1050 if (dump) {
1051 fprintf(stderr, "SI CODE:\n");
1052 for (i = 0; i < inst_byte_count; i+=4 ) {
1053 fprintf(stderr, "%02x%02x%02x%02x\n", inst_bytes[i + 3],
1054 inst_bytes[i + 2], inst_bytes[i + 1],
1055 inst_bytes[i]);
1056 }
1057 }
1058
1059 shader->num_sgprs = util_le32_to_cpu(*(uint32_t*)inst_bytes);
1060 shader->num_vgprs = util_le32_to_cpu(*(uint32_t*)(inst_bytes + 4));
1061 shader->spi_ps_input_ena = util_le32_to_cpu(*(uint32_t*)(inst_bytes + 8));
1062
1063 radeon_llvm_dispose(&si_shader_ctx.radeon_bld);
1064 tgsi_parse_free(&si_shader_ctx.parse);
1065
1066 /* copy new shader */
1067 si_resource_reference(&shader->bo, NULL);
1068 shader->bo = si_resource_create_custom(ctx->screen, PIPE_USAGE_IMMUTABLE,
1069 inst_byte_count - 12);
1070 if (shader->bo == NULL) {
1071 return -ENOMEM;
1072 }
1073
1074 ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
1075 if (0 /*R600_BIG_ENDIAN*/) {
1076 for (i = 0; i < (inst_byte_count-12)/4; ++i) {
1077 ptr[i] = util_bswap32(*(uint32_t*)(inst_bytes+12 + i*4));
1078 }
1079 } else {
1080 memcpy(ptr, inst_bytes + 12, inst_byte_count - 12);
1081 }
1082 rctx->ws->buffer_unmap(shader->bo->cs_buf);
1083
1084 free(inst_bytes);
1085
1086 return 0;
1087 }
1088
1089 void si_pipe_shader_destroy(struct pipe_context *ctx, struct si_pipe_shader *shader)
1090 {
1091 si_resource_reference(&shader->bo, NULL);
1092 }