radeonsi: Handle TGSI_SEMANTIC_FOG.
[mesa.git] / src / gallium / drivers / radeonsi / radeonsi_shader.c
1
2 /*
3 * Copyright 2012 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Tom Stellard <thomas.stellard@amd.com>
26 * Michel Dänzer <michel.daenzer@amd.com>
27 * Christian König <christian.koenig@amd.com>
28 */
29
30 #include "gallivm/lp_bld_tgsi_action.h"
31 #include "gallivm/lp_bld_const.h"
32 #include "gallivm/lp_bld_gather.h"
33 #include "gallivm/lp_bld_intr.h"
34 #include "gallivm/lp_bld_tgsi.h"
35 #include "radeon_llvm.h"
36 #include "radeon_llvm_emit.h"
37 #include "tgsi/tgsi_info.h"
38 #include "tgsi/tgsi_parse.h"
39 #include "tgsi/tgsi_scan.h"
40 #include "tgsi/tgsi_dump.h"
41
42 #include "radeonsi_pipe.h"
43 #include "radeonsi_shader.h"
44 #include "si_state.h"
45 #include "sid.h"
46
47 #include <assert.h>
48 #include <errno.h>
49 #include <stdio.h>
50
51 /*
52 static ps_remap_inputs(
53 struct tgsi_llvm_context * tl_ctx,
54 unsigned tgsi_index,
55 unsigned tgsi_chan)
56 {
57 :
58 }
59
60 struct si_input
61 {
62 struct list_head head;
63 unsigned tgsi_index;
64 unsigned tgsi_chan;
65 unsigned order;
66 };
67 */
68
69
70 struct si_shader_context
71 {
72 struct radeon_llvm_context radeon_bld;
73 struct r600_context *rctx;
74 struct tgsi_parse_context parse;
75 struct tgsi_token * tokens;
76 struct si_pipe_shader *shader;
77 unsigned type; /* TGSI_PROCESSOR_* specifies the type of shader. */
78 unsigned ninput_emitted;
79 /* struct list_head inputs; */
80 /* unsigned * input_mappings *//* From TGSI to SI hw */
81 /* struct tgsi_shader_info info;*/
82 };
83
84 static struct si_shader_context * si_shader_context(
85 struct lp_build_tgsi_context * bld_base)
86 {
87 return (struct si_shader_context *)bld_base;
88 }
89
90
91 #define PERSPECTIVE_BASE 0
92 #define LINEAR_BASE 9
93
94 #define SAMPLE_OFFSET 0
95 #define CENTER_OFFSET 2
96 #define CENTROID_OFSET 4
97
98 #define USE_SGPR_MAX_SUFFIX_LEN 5
99 #define CONST_ADDR_SPACE 2
100 #define USER_SGPR_ADDR_SPACE 8
101
102 enum sgpr_type {
103 SGPR_CONST_PTR_F32,
104 SGPR_CONST_PTR_V4I32,
105 SGPR_CONST_PTR_V8I32,
106 SGPR_I32,
107 SGPR_I64
108 };
109
110 /**
111 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad
112 *
113 * @param offset The offset parameter specifies the number of
114 * elements to offset, not the number of bytes or dwords. An element is the
115 * the type pointed to by the base_ptr parameter (e.g. int is the element of
116 * an int* pointer)
117 *
118 * When LLVM lowers the load instruction, it will convert the element offset
119 * into a dword offset automatically.
120 *
121 */
122 static LLVMValueRef build_indexed_load(
123 struct gallivm_state * gallivm,
124 LLVMValueRef base_ptr,
125 LLVMValueRef offset)
126 {
127 LLVMValueRef computed_ptr = LLVMBuildGEP(
128 gallivm->builder, base_ptr, &offset, 1, "");
129
130 return LLVMBuildLoad(gallivm->builder, computed_ptr, "");
131 }
132
133 /**
134 * Load a value stored in one of the user SGPRs
135 *
136 * @param sgpr This is the sgpr to load the value from. If you need to load a
137 * value that is stored in consecutive SGPR registers (e.g. a 64-bit pointer),
138 * then you should pass the index of the first SGPR that holds the value. For
139 * example, if you want to load a pointer that is stored in SGPRs 2 and 3, then
140 * use pass 2 for the sgpr parameter.
141 *
142 * The value of the sgpr parameter must also be aligned to the width of the type
143 * being loaded, so that the sgpr parameter is divisible by the dword width of the
144 * type. For example, if the value being loaded is two dwords wide, then the sgpr
145 * parameter must be divisible by two.
146 */
147 static LLVMValueRef use_sgpr(
148 struct gallivm_state * gallivm,
149 enum sgpr_type type,
150 unsigned sgpr)
151 {
152 LLVMValueRef sgpr_index;
153 LLVMTypeRef ret_type;
154 LLVMValueRef ptr;
155
156 sgpr_index = lp_build_const_int32(gallivm, sgpr);
157
158 switch (type) {
159 case SGPR_CONST_PTR_F32:
160 assert(sgpr % 2 == 0);
161 ret_type = LLVMFloatTypeInContext(gallivm->context);
162 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
163 break;
164
165 case SGPR_I32:
166 ret_type = LLVMInt32TypeInContext(gallivm->context);
167 break;
168
169 case SGPR_I64:
170 assert(sgpr % 2 == 0);
171 ret_type= LLVMInt64TypeInContext(gallivm->context);
172 break;
173
174 case SGPR_CONST_PTR_V4I32:
175 assert(sgpr % 2 == 0);
176 ret_type = LLVMInt32TypeInContext(gallivm->context);
177 ret_type = LLVMVectorType(ret_type, 4);
178 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
179 break;
180
181 case SGPR_CONST_PTR_V8I32:
182 assert(sgpr % 2 == 0);
183 ret_type = LLVMInt32TypeInContext(gallivm->context);
184 ret_type = LLVMVectorType(ret_type, 8);
185 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
186 break;
187
188 default:
189 assert(!"Unsupported SGPR type in use_sgpr()");
190 return NULL;
191 }
192
193 ret_type = LLVMPointerType(ret_type, USER_SGPR_ADDR_SPACE);
194 ptr = LLVMBuildIntToPtr(gallivm->builder, sgpr_index, ret_type, "");
195 return LLVMBuildLoad(gallivm->builder, ptr, "");
196 }
197
198 static void declare_input_vs(
199 struct si_shader_context * si_shader_ctx,
200 unsigned input_index,
201 const struct tgsi_full_declaration *decl)
202 {
203 LLVMValueRef t_list_ptr;
204 LLVMValueRef t_offset;
205 LLVMValueRef t_list;
206 LLVMValueRef attribute_offset;
207 LLVMValueRef buffer_index_reg;
208 LLVMValueRef args[3];
209 LLVMTypeRef vec4_type;
210 LLVMValueRef input;
211 struct lp_build_context * uint = &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
212 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
213 struct r600_context *rctx = si_shader_ctx->rctx;
214 //struct pipe_vertex_element *velem = &rctx->vertex_elements->elements[input_index];
215 unsigned chan;
216
217 /* Load the T list */
218 /* XXX: Communicate with the rest of the driver about which SGPR the T#
219 * list pointer is going to be stored in. Hard code to SGPR[6:7] for
220 * now */
221 t_list_ptr = use_sgpr(base->gallivm, SGPR_CONST_PTR_V4I32, 6);
222
223 t_offset = lp_build_const_int32(base->gallivm, input_index);
224
225 t_list = build_indexed_load(base->gallivm, t_list_ptr, t_offset);
226
227 /* Build the attribute offset */
228 attribute_offset = lp_build_const_int32(base->gallivm, 0);
229
230 /* Load the buffer index is always, which is always stored in VGPR0
231 * for Vertex Shaders */
232 buffer_index_reg = build_intrinsic(base->gallivm->builder,
233 "llvm.SI.vs.load.buffer.index", uint->elem_type, NULL, 0,
234 LLVMReadNoneAttribute);
235
236 vec4_type = LLVMVectorType(base->elem_type, 4);
237 args[0] = t_list;
238 args[1] = attribute_offset;
239 args[2] = buffer_index_reg;
240 input = lp_build_intrinsic(base->gallivm->builder,
241 "llvm.SI.vs.load.input", vec4_type, args, 3);
242
243 /* Break up the vec4 into individual components */
244 for (chan = 0; chan < 4; chan++) {
245 LLVMValueRef llvm_chan = lp_build_const_int32(base->gallivm, chan);
246 /* XXX: Use a helper function for this. There is one in
247 * tgsi_llvm.c. */
248 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, chan)] =
249 LLVMBuildExtractElement(base->gallivm->builder,
250 input, llvm_chan, "");
251 }
252 }
253
254 static void declare_input_fs(
255 struct si_shader_context * si_shader_ctx,
256 unsigned input_index,
257 const struct tgsi_full_declaration *decl)
258 {
259 const char * intr_name;
260 unsigned chan;
261 struct lp_build_context * base =
262 &si_shader_ctx->radeon_bld.soa.bld_base.base;
263 struct gallivm_state * gallivm = base->gallivm;
264
265 /* This value is:
266 * [15:0] NewPrimMask (Bit mask for each quad. It is set it the
267 * quad begins a new primitive. Bit 0 always needs
268 * to be unset)
269 * [32:16] ParamOffset
270 *
271 */
272 /* XXX: This register number must be identical to the S_00B02C_USER_SGPR
273 * register field value
274 */
275 LLVMValueRef params = use_sgpr(base->gallivm, SGPR_I32, 6);
276
277
278 /* XXX: Is this the input_index? */
279 LLVMValueRef attr_number = lp_build_const_int32(gallivm, input_index);
280
281 /* XXX: Handle all possible interpolation modes */
282 switch (decl->Interp.Interpolate) {
283 case TGSI_INTERPOLATE_COLOR:
284 /* XXX: Flat shading hangs the GPU */
285 if (si_shader_ctx->rctx->queued.named.rasterizer &&
286 si_shader_ctx->rctx->queued.named.rasterizer->flatshade) {
287 #if 0
288 intr_name = "llvm.SI.fs.interp.constant";
289 #else
290 intr_name = "llvm.SI.fs.interp.linear.center";
291 #endif
292 } else {
293 if (decl->Interp.Centroid)
294 intr_name = "llvm.SI.fs.interp.persp.centroid";
295 else
296 intr_name = "llvm.SI.fs.interp.persp.center";
297 }
298 break;
299 case TGSI_INTERPOLATE_CONSTANT:
300 /* XXX: Flat shading hangs the GPU */
301 #if 0
302 intr_name = "llvm.SI.fs.interp.constant";
303 break;
304 #endif
305 case TGSI_INTERPOLATE_LINEAR:
306 if (decl->Interp.Centroid)
307 intr_name = "llvm.SI.fs.interp.linear.centroid";
308 else
309 intr_name = "llvm.SI.fs.interp.linear.center";
310 break;
311 case TGSI_INTERPOLATE_PERSPECTIVE:
312 if (decl->Interp.Centroid)
313 intr_name = "llvm.SI.fs.interp.persp.centroid";
314 else
315 intr_name = "llvm.SI.fs.interp.persp.center";
316 break;
317 default:
318 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
319 return;
320 }
321
322 if (!si_shader_ctx->ninput_emitted++) {
323 /* Enable whole quad mode */
324 lp_build_intrinsic(gallivm->builder,
325 "llvm.SI.wqm",
326 LLVMVoidTypeInContext(gallivm->context),
327 NULL, 0);
328 }
329
330 /* XXX: Could there be more than TGSI_NUM_CHANNELS (4) ? */
331 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
332 LLVMValueRef args[3];
333 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
334 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
335 LLVMTypeRef input_type = LLVMFloatTypeInContext(gallivm->context);
336 args[0] = llvm_chan;
337 args[1] = attr_number;
338 args[2] = params;
339 si_shader_ctx->radeon_bld.inputs[soa_index] =
340 build_intrinsic(base->gallivm->builder, intr_name,
341 input_type, args, 3, LLVMReadOnlyAttribute);
342 }
343 }
344
345 static void declare_input(
346 struct radeon_llvm_context * radeon_bld,
347 unsigned input_index,
348 const struct tgsi_full_declaration *decl)
349 {
350 struct si_shader_context * si_shader_ctx =
351 si_shader_context(&radeon_bld->soa.bld_base);
352 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
353 declare_input_vs(si_shader_ctx, input_index, decl);
354 } else if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
355 declare_input_fs(si_shader_ctx, input_index, decl);
356 } else {
357 fprintf(stderr, "Warning: Unsupported shader type,\n");
358 }
359 }
360
361 static LLVMValueRef fetch_constant(
362 struct lp_build_tgsi_context * bld_base,
363 const struct tgsi_full_src_register *reg,
364 enum tgsi_opcode_type type,
365 unsigned swizzle)
366 {
367 struct lp_build_context * base = &bld_base->base;
368 unsigned idx;
369
370 LLVMValueRef const_ptr;
371 LLVMValueRef offset;
372 LLVMValueRef load;
373
374 /* currently not supported */
375 if (reg->Register.Indirect) {
376 assert(0);
377 load = lp_build_const_int32(base->gallivm, 0);
378 return bitcast(bld_base, type, load);
379 }
380
381 /* XXX: Assume the pointer to the constant buffer is being stored in
382 * SGPR[0:1] */
383 const_ptr = use_sgpr(base->gallivm, SGPR_CONST_PTR_F32, 0);
384
385 /* XXX: This assumes that the constant buffer is not packed, so
386 * CONST[0].x will have an offset of 0 and CONST[1].x will have an
387 * offset of 4. */
388 idx = (reg->Register.Index * 4) + swizzle;
389
390 /* index loads above 255 are currently not supported */
391 if (idx > 255) {
392 assert(0);
393 idx = 0;
394 }
395 offset = lp_build_const_int32(base->gallivm, idx);
396
397 load = build_indexed_load(base->gallivm, const_ptr, offset);
398 return bitcast(bld_base, type, load);
399 }
400
401 /* Initialize arguments for the shader export intrinsic */
402 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
403 struct tgsi_full_declaration *d,
404 unsigned index,
405 unsigned target,
406 LLVMValueRef *args)
407 {
408 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
409 struct lp_build_context *uint =
410 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
411 struct lp_build_context *base = &bld_base->base;
412 unsigned compressed = 0;
413 unsigned chan;
414
415 if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
416 int cbuf = target - V_008DFC_SQ_EXP_MRT;
417
418 if (cbuf >= 0 && cbuf < 8) {
419 struct r600_context *rctx = si_shader_ctx->rctx;
420 compressed = (rctx->export_16bpc >> cbuf) & 0x1;
421 }
422 }
423
424 if (compressed) {
425 /* Pixel shader needs to pack output values before export */
426 for (chan = 0; chan < 2; chan++ ) {
427 LLVMValueRef *out_ptr =
428 si_shader_ctx->radeon_bld.soa.outputs[index];
429 args[0] = LLVMBuildLoad(base->gallivm->builder,
430 out_ptr[2 * chan], "");
431 args[1] = LLVMBuildLoad(base->gallivm->builder,
432 out_ptr[2 * chan + 1], "");
433 args[chan + 5] =
434 build_intrinsic(base->gallivm->builder,
435 "llvm.SI.packf16",
436 LLVMInt32TypeInContext(base->gallivm->context),
437 args, 2,
438 LLVMReadNoneAttribute);
439 args[chan + 7] = args[chan + 5];
440 }
441
442 /* Set COMPR flag */
443 args[4] = uint->one;
444 } else {
445 for (chan = 0; chan < 4; chan++ ) {
446 LLVMValueRef out_ptr =
447 si_shader_ctx->radeon_bld.soa.outputs[index][chan];
448 /* +5 because the first output value will be
449 * the 6th argument to the intrinsic. */
450 args[chan + 5] = LLVMBuildLoad(base->gallivm->builder,
451 out_ptr, "");
452 }
453
454 /* Clear COMPR flag */
455 args[4] = uint->zero;
456 }
457
458 /* XXX: This controls which components of the output
459 * registers actually get exported. (e.g bit 0 means export
460 * X component, bit 1 means export Y component, etc.) I'm
461 * hard coding this to 0xf for now. In the future, we might
462 * want to do something else. */
463 args[0] = lp_build_const_int32(base->gallivm, 0xf);
464
465 /* Specify whether the EXEC mask represents the valid mask */
466 args[1] = uint->zero;
467
468 /* Specify whether this is the last export */
469 args[2] = uint->zero;
470
471 /* Specify the target we are exporting */
472 args[3] = lp_build_const_int32(base->gallivm, target);
473
474 /* XXX: We probably need to keep track of the output
475 * values, so we know what we are passing to the next
476 * stage. */
477 }
478
479 /* XXX: This is partially implemented for VS only at this point. It is not complete */
480 static void si_llvm_emit_epilogue(struct lp_build_tgsi_context * bld_base)
481 {
482 struct si_shader_context * si_shader_ctx = si_shader_context(bld_base);
483 struct si_shader * shader = &si_shader_ctx->shader->shader;
484 struct lp_build_context * base = &bld_base->base;
485 struct lp_build_context * uint =
486 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
487 struct tgsi_parse_context *parse = &si_shader_ctx->parse;
488 LLVMValueRef last_args[9] = { 0 };
489 unsigned color_count = 0;
490 unsigned param_count = 0;
491
492 while (!tgsi_parse_end_of_tokens(parse)) {
493 struct tgsi_full_declaration *d =
494 &parse->FullToken.FullDeclaration;
495 LLVMValueRef args[9];
496 unsigned target;
497 unsigned index;
498 int i;
499
500 tgsi_parse_token(parse);
501 if (parse->FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
502 continue;
503
504 switch (d->Declaration.File) {
505 case TGSI_FILE_INPUT:
506 i = shader->ninput++;
507 shader->input[i].name = d->Semantic.Name;
508 shader->input[i].sid = d->Semantic.Index;
509 shader->input[i].interpolate = d->Interp.Interpolate;
510 shader->input[i].centroid = d->Interp.Centroid;
511 continue;
512
513 case TGSI_FILE_OUTPUT:
514 i = shader->noutput++;
515 shader->output[i].name = d->Semantic.Name;
516 shader->output[i].sid = d->Semantic.Index;
517 shader->output[i].interpolate = d->Interp.Interpolate;
518 break;
519
520 default:
521 continue;
522 }
523
524 for (index = d->Range.First; index <= d->Range.Last; index++) {
525 /* Select the correct target */
526 switch(d->Semantic.Name) {
527 case TGSI_SEMANTIC_PSIZE:
528 case TGSI_SEMANTIC_POSITION:
529 target = V_008DFC_SQ_EXP_POS;
530 break;
531 case TGSI_SEMANTIC_COLOR:
532 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
533 target = V_008DFC_SQ_EXP_PARAM + param_count;
534 shader->output[i].param_offset = param_count;
535 param_count++;
536 } else {
537 target = V_008DFC_SQ_EXP_MRT + color_count;
538 color_count++;
539 }
540 break;
541 case TGSI_SEMANTIC_FOG:
542 case TGSI_SEMANTIC_GENERIC:
543 target = V_008DFC_SQ_EXP_PARAM + param_count;
544 shader->output[i].param_offset = param_count;
545 param_count++;
546 break;
547 default:
548 target = 0;
549 fprintf(stderr,
550 "Warning: SI unhandled output type:%d\n",
551 d->Semantic.Name);
552 }
553
554 si_llvm_init_export_args(bld_base, d, index, target, args);
555
556 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX ?
557 (d->Semantic.Name == TGSI_SEMANTIC_POSITION) :
558 (d->Semantic.Name == TGSI_SEMANTIC_COLOR)) {
559 if (last_args[0]) {
560 lp_build_intrinsic(base->gallivm->builder,
561 "llvm.SI.export",
562 LLVMVoidTypeInContext(base->gallivm->context),
563 last_args, 9);
564 }
565
566 memcpy(last_args, args, sizeof(args));
567 } else {
568 lp_build_intrinsic(base->gallivm->builder,
569 "llvm.SI.export",
570 LLVMVoidTypeInContext(base->gallivm->context),
571 args, 9);
572 }
573
574 }
575 }
576
577 if (!last_args[0]) {
578 assert(si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
579
580 /* Specify which components to enable */
581 last_args[0] = lp_build_const_int32(base->gallivm, 0x0);
582
583 /* Specify the target we are exporting */
584 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
585
586 /* Set COMPR flag to zero to export data as 32-bit */
587 last_args[4] = uint->zero;
588
589 /* dummy bits */
590 last_args[5]= uint->zero;
591 last_args[6]= uint->zero;
592 last_args[7]= uint->zero;
593 last_args[8]= uint->zero;
594 }
595
596 /* Specify whether the EXEC mask represents the valid mask */
597 last_args[1] = lp_build_const_int32(base->gallivm,
598 si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
599
600 /* Specify that this is the last export */
601 last_args[2] = lp_build_const_int32(base->gallivm, 1);
602
603 lp_build_intrinsic(base->gallivm->builder,
604 "llvm.SI.export",
605 LLVMVoidTypeInContext(base->gallivm->context),
606 last_args, 9);
607
608 /* XXX: Look up what this function does */
609 /* ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);*/
610 }
611
612 static void tex_fetch_args(
613 struct lp_build_tgsi_context * bld_base,
614 struct lp_build_emit_data * emit_data)
615 {
616 const struct tgsi_full_instruction * inst = emit_data->inst;
617 LLVMValueRef ptr;
618 LLVMValueRef offset;
619
620 /* WriteMask */
621 /* XXX: should be optimized using emit_data->inst->Dst[0].Register.WriteMask*/
622 emit_data->args[0] = lp_build_const_int32(bld_base->base.gallivm, 0xf);
623
624 /* Coordinates */
625 /* XXX: Not all sample instructions need 4 address arguments. */
626 if (inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
627 LLVMValueRef src_w;
628 unsigned chan;
629 LLVMValueRef coords[4];
630
631 emit_data->dst_type = LLVMVectorType(bld_base->base.elem_type, 4);
632 src_w = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
633
634 for (chan = 0; chan < 3; chan++ ) {
635 LLVMValueRef arg = lp_build_emit_fetch(bld_base,
636 emit_data->inst, 0, chan);
637 coords[chan] = lp_build_emit_llvm_binary(bld_base,
638 TGSI_OPCODE_DIV,
639 arg, src_w);
640 }
641 coords[3] = bld_base->base.one;
642 emit_data->args[1] = lp_build_gather_values(bld_base->base.gallivm,
643 coords, 4);
644 } else
645 emit_data->args[1] = lp_build_emit_fetch(bld_base, emit_data->inst,
646 0, LP_CHAN_ALL);
647
648 /* Resource */
649 ptr = use_sgpr(bld_base->base.gallivm, SGPR_CONST_PTR_V8I32, 4);
650 offset = lp_build_const_int32(bld_base->base.gallivm,
651 emit_data->inst->Src[1].Register.Index);
652 emit_data->args[2] = build_indexed_load(bld_base->base.gallivm,
653 ptr, offset);
654
655 /* Sampler */
656 ptr = use_sgpr(bld_base->base.gallivm, SGPR_CONST_PTR_V4I32, 2);
657 offset = lp_build_const_int32(bld_base->base.gallivm,
658 emit_data->inst->Src[1].Register.Index);
659 emit_data->args[3] = build_indexed_load(bld_base->base.gallivm,
660 ptr, offset);
661
662 /* Dimensions */
663 /* XXX: We might want to pass this information to the shader at some. */
664 /* emit_data->args[4] = lp_build_const_int32(bld_base->base.gallivm,
665 emit_data->inst->Texture.Texture);
666 */
667
668 emit_data->arg_count = 4;
669 /* XXX: To optimize, we could use a float or v2f32, if the last bits of
670 * the writemask are clear */
671 emit_data->dst_type = LLVMVectorType(
672 LLVMFloatTypeInContext(bld_base->base.gallivm->context),
673 4);
674 }
675
676 static const struct lp_build_tgsi_action tex_action = {
677 .fetch_args = tex_fetch_args,
678 .emit = lp_build_tgsi_intrinsic,
679 .intr_name = "llvm.SI.sample"
680 };
681
682
683 int si_pipe_shader_create(
684 struct pipe_context *ctx,
685 struct si_pipe_shader *shader)
686 {
687 struct r600_context *rctx = (struct r600_context*)ctx;
688 struct si_pipe_shader_selector *sel = shader->selector;
689 struct si_shader_context si_shader_ctx;
690 struct tgsi_shader_info shader_info;
691 struct lp_build_tgsi_context * bld_base;
692 LLVMModuleRef mod;
693 unsigned char * inst_bytes;
694 unsigned inst_byte_count;
695 unsigned i;
696 uint32_t *ptr;
697 bool dump;
698
699 dump = debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE);
700
701 memset(&si_shader_ctx, 0, sizeof(si_shader_ctx));
702 radeon_llvm_context_init(&si_shader_ctx.radeon_bld);
703 bld_base = &si_shader_ctx.radeon_bld.soa.bld_base;
704
705 tgsi_scan_shader(sel->tokens, &shader_info);
706 bld_base->info = &shader_info;
707 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
708 bld_base->emit_epilogue = si_llvm_emit_epilogue;
709
710 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
711 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
712
713 si_shader_ctx.radeon_bld.load_input = declare_input;
714 si_shader_ctx.tokens = sel->tokens;
715 tgsi_parse_init(&si_shader_ctx.parse, si_shader_ctx.tokens);
716 si_shader_ctx.shader = shader;
717 si_shader_ctx.type = si_shader_ctx.parse.FullHeader.Processor.Processor;
718 si_shader_ctx.rctx = rctx;
719
720 shader->shader.nr_cbufs = rctx->framebuffer.nr_cbufs;
721
722 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
723 * conversion fails. */
724 if (dump) {
725 tgsi_dump(sel->tokens, 0);
726 }
727
728 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
729 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
730 return -EINVAL;
731 }
732
733 radeon_llvm_finalize_module(&si_shader_ctx.radeon_bld);
734
735 mod = bld_base->base.gallivm->module;
736 if (dump) {
737 LLVMDumpModule(mod);
738 }
739 radeon_llvm_compile(mod, &inst_bytes, &inst_byte_count, "SI", dump);
740 if (dump) {
741 fprintf(stderr, "SI CODE:\n");
742 for (i = 0; i < inst_byte_count; i+=4 ) {
743 fprintf(stderr, "%02x%02x%02x%02x\n", inst_bytes[i + 3],
744 inst_bytes[i + 2], inst_bytes[i + 1],
745 inst_bytes[i]);
746 }
747 }
748
749 shader->num_sgprs = util_le32_to_cpu(*(uint32_t*)inst_bytes);
750 shader->num_vgprs = util_le32_to_cpu(*(uint32_t*)(inst_bytes + 4));
751 shader->spi_ps_input_ena = util_le32_to_cpu(*(uint32_t*)(inst_bytes + 8));
752
753 radeon_llvm_dispose(&si_shader_ctx.radeon_bld);
754 tgsi_parse_free(&si_shader_ctx.parse);
755
756 /* copy new shader */
757 si_resource_reference(&shader->bo, NULL);
758 shader->bo = si_resource_create_custom(ctx->screen, PIPE_USAGE_IMMUTABLE,
759 inst_byte_count - 12);
760 if (shader->bo == NULL) {
761 return -ENOMEM;
762 }
763
764 ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
765 if (0 /*R600_BIG_ENDIAN*/) {
766 for (i = 0; i < (inst_byte_count-12)/4; ++i) {
767 ptr[i] = util_bswap32(*(uint32_t*)(inst_bytes+12 + i*4));
768 }
769 } else {
770 memcpy(ptr, inst_bytes + 12, inst_byte_count - 12);
771 }
772 rctx->ws->buffer_unmap(shader->bo->cs_buf);
773
774 free(inst_bytes);
775
776 return 0;
777 }
778
779 void si_pipe_shader_destroy(struct pipe_context *ctx, struct si_pipe_shader *shader)
780 {
781 si_resource_reference(&shader->bo, NULL);
782 }