radeonsi: emit TA_BC_BASE_ADDR_HI for border color on CIK
[mesa.git] / src / gallium / drivers / radeonsi / radeonsi_shader.c
1
2 /*
3 * Copyright 2012 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Tom Stellard <thomas.stellard@amd.com>
26 * Michel Dänzer <michel.daenzer@amd.com>
27 * Christian König <christian.koenig@amd.com>
28 */
29
30 #include "gallivm/lp_bld_tgsi_action.h"
31 #include "gallivm/lp_bld_const.h"
32 #include "gallivm/lp_bld_gather.h"
33 #include "gallivm/lp_bld_intr.h"
34 #include "gallivm/lp_bld_logic.h"
35 #include "gallivm/lp_bld_tgsi.h"
36 #include "gallivm/lp_bld_arit.h"
37 #include "radeon_llvm.h"
38 #include "radeon_llvm_emit.h"
39 #include "util/u_memory.h"
40 #include "tgsi/tgsi_info.h"
41 #include "tgsi/tgsi_parse.h"
42 #include "tgsi/tgsi_scan.h"
43 #include "tgsi/tgsi_util.h"
44 #include "tgsi/tgsi_dump.h"
45
46 #include "radeonsi_pipe.h"
47 #include "radeonsi_shader.h"
48 #include "si_state.h"
49 #include "sid.h"
50
51 #include <assert.h>
52 #include <errno.h>
53 #include <stdio.h>
54
55 struct si_shader_context
56 {
57 struct radeon_llvm_context radeon_bld;
58 struct tgsi_parse_context parse;
59 struct tgsi_token * tokens;
60 struct si_pipe_shader *shader;
61 unsigned type; /* TGSI_PROCESSOR_* specifies the type of shader. */
62 LLVMValueRef const_md;
63 LLVMValueRef const_resource;
64 LLVMValueRef *constants;
65 LLVMValueRef *resources;
66 LLVMValueRef *samplers;
67 };
68
69 static struct si_shader_context * si_shader_context(
70 struct lp_build_tgsi_context * bld_base)
71 {
72 return (struct si_shader_context *)bld_base;
73 }
74
75
76 #define PERSPECTIVE_BASE 0
77 #define LINEAR_BASE 9
78
79 #define SAMPLE_OFFSET 0
80 #define CENTER_OFFSET 2
81 #define CENTROID_OFSET 4
82
83 #define USE_SGPR_MAX_SUFFIX_LEN 5
84 #define CONST_ADDR_SPACE 2
85 #define USER_SGPR_ADDR_SPACE 8
86
87 /**
88 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad
89 *
90 * @param offset The offset parameter specifies the number of
91 * elements to offset, not the number of bytes or dwords. An element is the
92 * the type pointed to by the base_ptr parameter (e.g. int is the element of
93 * an int* pointer)
94 *
95 * When LLVM lowers the load instruction, it will convert the element offset
96 * into a dword offset automatically.
97 *
98 */
99 static LLVMValueRef build_indexed_load(
100 struct si_shader_context * si_shader_ctx,
101 LLVMValueRef base_ptr,
102 LLVMValueRef offset)
103 {
104 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
105
106 LLVMValueRef computed_ptr = LLVMBuildGEP(
107 base->gallivm->builder, base_ptr, &offset, 1, "");
108
109 LLVMValueRef result = LLVMBuildLoad(base->gallivm->builder, computed_ptr, "");
110 LLVMSetMetadata(result, 1, si_shader_ctx->const_md);
111 return result;
112 }
113
114 static LLVMValueRef get_instance_index(
115 struct radeon_llvm_context * radeon_bld,
116 unsigned divisor)
117 {
118 struct gallivm_state * gallivm = radeon_bld->soa.bld_base.base.gallivm;
119
120 LLVMValueRef result = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_INSTANCE_ID);
121 result = LLVMBuildAdd(gallivm->builder, result, LLVMGetParam(
122 radeon_bld->main_fn, SI_PARAM_START_INSTANCE), "");
123
124 if (divisor > 1)
125 result = LLVMBuildUDiv(gallivm->builder, result,
126 lp_build_const_int32(gallivm, divisor), "");
127
128 return result;
129 }
130
131 static void declare_input_vs(
132 struct si_shader_context * si_shader_ctx,
133 unsigned input_index,
134 const struct tgsi_full_declaration *decl)
135 {
136 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
137 unsigned divisor = si_shader_ctx->shader->key.vs.instance_divisors[input_index];
138
139 unsigned chan;
140
141 LLVMValueRef t_list_ptr;
142 LLVMValueRef t_offset;
143 LLVMValueRef t_list;
144 LLVMValueRef attribute_offset;
145 LLVMValueRef buffer_index;
146 LLVMValueRef args[3];
147 LLVMTypeRef vec4_type;
148 LLVMValueRef input;
149
150 /* Load the T list */
151 t_list_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_VERTEX_BUFFER);
152
153 t_offset = lp_build_const_int32(base->gallivm, input_index);
154
155 t_list = build_indexed_load(si_shader_ctx, t_list_ptr, t_offset);
156
157 /* Build the attribute offset */
158 attribute_offset = lp_build_const_int32(base->gallivm, 0);
159
160 if (divisor) {
161 /* Build index from instance ID, start instance and divisor */
162 si_shader_ctx->shader->shader.uses_instanceid = true;
163 buffer_index = get_instance_index(&si_shader_ctx->radeon_bld, divisor);
164 } else {
165 /* Load the buffer index, which is always stored in VGPR0
166 * for Vertex Shaders */
167 buffer_index = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_VERTEX_ID);
168 }
169
170 vec4_type = LLVMVectorType(base->elem_type, 4);
171 args[0] = t_list;
172 args[1] = attribute_offset;
173 args[2] = buffer_index;
174 input = build_intrinsic(base->gallivm->builder,
175 "llvm.SI.vs.load.input", vec4_type, args, 3,
176 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
177
178 /* Break up the vec4 into individual components */
179 for (chan = 0; chan < 4; chan++) {
180 LLVMValueRef llvm_chan = lp_build_const_int32(base->gallivm, chan);
181 /* XXX: Use a helper function for this. There is one in
182 * tgsi_llvm.c. */
183 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, chan)] =
184 LLVMBuildExtractElement(base->gallivm->builder,
185 input, llvm_chan, "");
186 }
187 }
188
189 static void declare_input_fs(
190 struct si_shader_context * si_shader_ctx,
191 unsigned input_index,
192 const struct tgsi_full_declaration *decl)
193 {
194 struct si_shader *shader = &si_shader_ctx->shader->shader;
195 struct lp_build_context * base =
196 &si_shader_ctx->radeon_bld.soa.bld_base.base;
197 struct gallivm_state * gallivm = base->gallivm;
198 LLVMTypeRef input_type = LLVMFloatTypeInContext(gallivm->context);
199 LLVMValueRef main_fn = si_shader_ctx->radeon_bld.main_fn;
200
201 LLVMValueRef interp_param;
202 const char * intr_name;
203
204 /* This value is:
205 * [15:0] NewPrimMask (Bit mask for each quad. It is set it the
206 * quad begins a new primitive. Bit 0 always needs
207 * to be unset)
208 * [32:16] ParamOffset
209 *
210 */
211 LLVMValueRef params = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_PRIM_MASK);
212 LLVMValueRef attr_number;
213
214 unsigned chan;
215
216 if (decl->Semantic.Name == TGSI_SEMANTIC_POSITION) {
217 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
218 unsigned soa_index =
219 radeon_llvm_reg_index_soa(input_index, chan);
220 si_shader_ctx->radeon_bld.inputs[soa_index] =
221 LLVMGetParam(main_fn, SI_PARAM_POS_X_FLOAT + chan);
222
223 if (chan == 3)
224 /* RCP for fragcoord.w */
225 si_shader_ctx->radeon_bld.inputs[soa_index] =
226 LLVMBuildFDiv(gallivm->builder,
227 lp_build_const_float(gallivm, 1.0f),
228 si_shader_ctx->radeon_bld.inputs[soa_index],
229 "");
230 }
231 return;
232 }
233
234 if (decl->Semantic.Name == TGSI_SEMANTIC_FACE) {
235 LLVMValueRef face, is_face_positive;
236
237 face = LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE);
238
239 is_face_positive = LLVMBuildFCmp(gallivm->builder,
240 LLVMRealUGT, face,
241 lp_build_const_float(gallivm, 0.0f),
242 "");
243
244 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 0)] =
245 LLVMBuildSelect(gallivm->builder,
246 is_face_positive,
247 lp_build_const_float(gallivm, 1.0f),
248 lp_build_const_float(gallivm, 0.0f),
249 "");
250 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 1)] =
251 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 2)] =
252 lp_build_const_float(gallivm, 0.0f);
253 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 3)] =
254 lp_build_const_float(gallivm, 1.0f);
255
256 return;
257 }
258
259 shader->input[input_index].param_offset = shader->ninterp++;
260 attr_number = lp_build_const_int32(gallivm,
261 shader->input[input_index].param_offset);
262
263 /* XXX: Handle all possible interpolation modes */
264 switch (decl->Interp.Interpolate) {
265 case TGSI_INTERPOLATE_COLOR:
266 if (si_shader_ctx->shader->key.ps.flatshade) {
267 interp_param = 0;
268 } else {
269 if (decl->Interp.Centroid)
270 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTROID);
271 else
272 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTER);
273 }
274 break;
275 case TGSI_INTERPOLATE_CONSTANT:
276 interp_param = 0;
277 break;
278 case TGSI_INTERPOLATE_LINEAR:
279 if (decl->Interp.Centroid)
280 interp_param = LLVMGetParam(main_fn, SI_PARAM_LINEAR_CENTROID);
281 else
282 interp_param = LLVMGetParam(main_fn, SI_PARAM_LINEAR_CENTER);
283 break;
284 case TGSI_INTERPOLATE_PERSPECTIVE:
285 if (decl->Interp.Centroid)
286 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTROID);
287 else
288 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTER);
289 break;
290 default:
291 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
292 return;
293 }
294
295 intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
296
297 /* XXX: Could there be more than TGSI_NUM_CHANNELS (4) ? */
298 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR &&
299 si_shader_ctx->shader->key.ps.color_two_side) {
300 LLVMValueRef args[4];
301 LLVMValueRef face, is_face_positive;
302 LLVMValueRef back_attr_number =
303 lp_build_const_int32(gallivm,
304 shader->input[input_index].param_offset + 1);
305
306 face = LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE);
307
308 is_face_positive = LLVMBuildFCmp(gallivm->builder,
309 LLVMRealUGT, face,
310 lp_build_const_float(gallivm, 0.0f),
311 "");
312
313 args[2] = params;
314 args[3] = interp_param;
315 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
316 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
317 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
318 LLVMValueRef front, back;
319
320 args[0] = llvm_chan;
321 args[1] = attr_number;
322 front = build_intrinsic(base->gallivm->builder, intr_name,
323 input_type, args, args[3] ? 4 : 3,
324 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
325
326 args[1] = back_attr_number;
327 back = build_intrinsic(base->gallivm->builder, intr_name,
328 input_type, args, args[3] ? 4 : 3,
329 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
330
331 si_shader_ctx->radeon_bld.inputs[soa_index] =
332 LLVMBuildSelect(gallivm->builder,
333 is_face_positive,
334 front,
335 back,
336 "");
337 }
338
339 shader->ninterp++;
340 } else {
341 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
342 LLVMValueRef args[4];
343 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
344 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
345 args[0] = llvm_chan;
346 args[1] = attr_number;
347 args[2] = params;
348 args[3] = interp_param;
349 si_shader_ctx->radeon_bld.inputs[soa_index] =
350 build_intrinsic(base->gallivm->builder, intr_name,
351 input_type, args, args[3] ? 4 : 3,
352 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
353 }
354 }
355 }
356
357 static void declare_input(
358 struct radeon_llvm_context * radeon_bld,
359 unsigned input_index,
360 const struct tgsi_full_declaration *decl)
361 {
362 struct si_shader_context * si_shader_ctx =
363 si_shader_context(&radeon_bld->soa.bld_base);
364 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
365 declare_input_vs(si_shader_ctx, input_index, decl);
366 } else if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
367 declare_input_fs(si_shader_ctx, input_index, decl);
368 } else {
369 fprintf(stderr, "Warning: Unsupported shader type,\n");
370 }
371 }
372
373 static void declare_system_value(
374 struct radeon_llvm_context * radeon_bld,
375 unsigned index,
376 const struct tgsi_full_declaration *decl)
377 {
378
379 LLVMValueRef value = 0;
380
381 switch (decl->Semantic.Name) {
382 case TGSI_SEMANTIC_INSTANCEID:
383 value = get_instance_index(radeon_bld, 1);
384 break;
385
386 case TGSI_SEMANTIC_VERTEXID:
387 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_VERTEX_ID);
388 break;
389
390 default:
391 assert(!"unknown system value");
392 return;
393 }
394
395 radeon_bld->system_values[index] = value;
396 }
397
398 static LLVMValueRef fetch_constant(
399 struct lp_build_tgsi_context * bld_base,
400 const struct tgsi_full_src_register *reg,
401 enum tgsi_opcode_type type,
402 unsigned swizzle)
403 {
404 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
405 struct lp_build_context * base = &bld_base->base;
406 const struct tgsi_ind_register *ireg = &reg->Indirect;
407 unsigned idx;
408
409 LLVMValueRef args[2];
410 LLVMValueRef addr;
411 LLVMValueRef result;
412
413 if (swizzle == LP_CHAN_ALL) {
414 unsigned chan;
415 LLVMValueRef values[4];
416 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
417 values[chan] = fetch_constant(bld_base, reg, type, chan);
418
419 return lp_build_gather_values(bld_base->base.gallivm, values, 4);
420 }
421
422 idx = reg->Register.Index * 4 + swizzle;
423 if (!reg->Register.Indirect)
424 return bitcast(bld_base, type, si_shader_ctx->constants[idx]);
425
426 args[0] = si_shader_ctx->const_resource;
427 args[1] = lp_build_const_int32(base->gallivm, idx * 4);
428 addr = si_shader_ctx->radeon_bld.soa.addr[ireg->Index][ireg->Swizzle];
429 addr = LLVMBuildLoad(base->gallivm->builder, addr, "load addr reg");
430 addr = lp_build_mul_imm(&bld_base->uint_bld, addr, 16);
431 args[1] = lp_build_add(&bld_base->uint_bld, addr, args[1]);
432
433 result = build_intrinsic(base->gallivm->builder, "llvm.SI.load.const", base->elem_type,
434 args, 2, LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
435
436 return bitcast(bld_base, type, result);
437 }
438
439 /* Initialize arguments for the shader export intrinsic */
440 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
441 struct tgsi_full_declaration *d,
442 unsigned index,
443 unsigned target,
444 LLVMValueRef *args)
445 {
446 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
447 struct lp_build_context *uint =
448 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
449 struct lp_build_context *base = &bld_base->base;
450 unsigned compressed = 0;
451 unsigned chan;
452
453 if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
454 int cbuf = target - V_008DFC_SQ_EXP_MRT;
455
456 if (cbuf >= 0 && cbuf < 8) {
457 compressed = (si_shader_ctx->shader->key.ps.export_16bpc >> cbuf) & 0x1;
458
459 if (compressed)
460 si_shader_ctx->shader->spi_shader_col_format |=
461 V_028714_SPI_SHADER_FP16_ABGR << (4 * cbuf);
462 else
463 si_shader_ctx->shader->spi_shader_col_format |=
464 V_028714_SPI_SHADER_32_ABGR << (4 * cbuf);
465
466 si_shader_ctx->shader->cb_shader_mask |= 0xf << (4 * cbuf);
467 }
468 }
469
470 if (compressed) {
471 /* Pixel shader needs to pack output values before export */
472 for (chan = 0; chan < 2; chan++ ) {
473 LLVMValueRef *out_ptr =
474 si_shader_ctx->radeon_bld.soa.outputs[index];
475 args[0] = LLVMBuildLoad(base->gallivm->builder,
476 out_ptr[2 * chan], "");
477 args[1] = LLVMBuildLoad(base->gallivm->builder,
478 out_ptr[2 * chan + 1], "");
479 args[chan + 5] =
480 build_intrinsic(base->gallivm->builder,
481 "llvm.SI.packf16",
482 LLVMInt32TypeInContext(base->gallivm->context),
483 args, 2,
484 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
485 args[chan + 7] = args[chan + 5] =
486 LLVMBuildBitCast(base->gallivm->builder,
487 args[chan + 5],
488 LLVMFloatTypeInContext(base->gallivm->context),
489 "");
490 }
491
492 /* Set COMPR flag */
493 args[4] = uint->one;
494 } else {
495 for (chan = 0; chan < 4; chan++ ) {
496 LLVMValueRef out_ptr =
497 si_shader_ctx->radeon_bld.soa.outputs[index][chan];
498 /* +5 because the first output value will be
499 * the 6th argument to the intrinsic. */
500 args[chan + 5] = LLVMBuildLoad(base->gallivm->builder,
501 out_ptr, "");
502 }
503
504 /* Clear COMPR flag */
505 args[4] = uint->zero;
506 }
507
508 /* XXX: This controls which components of the output
509 * registers actually get exported. (e.g bit 0 means export
510 * X component, bit 1 means export Y component, etc.) I'm
511 * hard coding this to 0xf for now. In the future, we might
512 * want to do something else. */
513 args[0] = lp_build_const_int32(base->gallivm, 0xf);
514
515 /* Specify whether the EXEC mask represents the valid mask */
516 args[1] = uint->zero;
517
518 /* Specify whether this is the last export */
519 args[2] = uint->zero;
520
521 /* Specify the target we are exporting */
522 args[3] = lp_build_const_int32(base->gallivm, target);
523
524 /* XXX: We probably need to keep track of the output
525 * values, so we know what we are passing to the next
526 * stage. */
527 }
528
529 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
530 unsigned index)
531 {
532 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
533 struct gallivm_state *gallivm = bld_base->base.gallivm;
534
535 if (si_shader_ctx->shader->key.ps.alpha_func != PIPE_FUNC_NEVER) {
536 LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][3];
537 LLVMValueRef alpha_pass =
538 lp_build_cmp(&bld_base->base,
539 si_shader_ctx->shader->key.ps.alpha_func,
540 LLVMBuildLoad(gallivm->builder, out_ptr, ""),
541 lp_build_const_float(gallivm, si_shader_ctx->shader->key.ps.alpha_ref));
542 LLVMValueRef arg =
543 lp_build_select(&bld_base->base,
544 alpha_pass,
545 lp_build_const_float(gallivm, 1.0f),
546 lp_build_const_float(gallivm, -1.0f));
547
548 build_intrinsic(gallivm->builder,
549 "llvm.AMDGPU.kill",
550 LLVMVoidTypeInContext(gallivm->context),
551 &arg, 1, 0);
552 } else {
553 build_intrinsic(gallivm->builder,
554 "llvm.AMDGPU.kilp",
555 LLVMVoidTypeInContext(gallivm->context),
556 NULL, 0, 0);
557 }
558 }
559
560 static void si_llvm_emit_clipvertex(struct lp_build_tgsi_context * bld_base,
561 unsigned index)
562 {
563 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
564 struct lp_build_context *base = &bld_base->base;
565 struct lp_build_context *uint = &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
566 LLVMValueRef args[9];
567 unsigned reg_index;
568 unsigned chan;
569 unsigned const_chan;
570 LLVMValueRef out_elts[4];
571 LLVMValueRef base_elt;
572 LLVMValueRef ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
573 LLVMValueRef const_resource = build_indexed_load(si_shader_ctx, ptr, uint->one);
574
575 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
576 LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][chan];
577 out_elts[chan] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
578 }
579
580 for (reg_index = 0; reg_index < 2; reg_index ++) {
581 args[5] =
582 args[6] =
583 args[7] =
584 args[8] = lp_build_const_float(base->gallivm, 0.0f);
585
586 /* Compute dot products of position and user clip plane vectors */
587 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
588 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
589 args[0] = const_resource;
590 args[1] = lp_build_const_int32(base->gallivm,
591 ((reg_index * 4 + chan) * 4 +
592 const_chan) * 4);
593 base_elt = build_intrinsic(base->gallivm->builder,
594 "llvm.SI.load.const",
595 base->elem_type,
596 args, 2,
597 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
598 args[5 + chan] =
599 lp_build_add(base, args[5 + chan],
600 lp_build_mul(base, base_elt,
601 out_elts[const_chan]));
602 }
603 }
604
605 args[0] = lp_build_const_int32(base->gallivm, 0xf);
606 args[1] = uint->zero;
607 args[2] = uint->zero;
608 args[3] = lp_build_const_int32(base->gallivm,
609 V_008DFC_SQ_EXP_POS + 2 + reg_index);
610 args[4] = uint->zero;
611 lp_build_intrinsic(base->gallivm->builder,
612 "llvm.SI.export",
613 LLVMVoidTypeInContext(base->gallivm->context),
614 args, 9);
615 }
616 }
617
618 /* XXX: This is partially implemented for VS only at this point. It is not complete */
619 static void si_llvm_emit_epilogue(struct lp_build_tgsi_context * bld_base)
620 {
621 struct si_shader_context * si_shader_ctx = si_shader_context(bld_base);
622 struct si_shader * shader = &si_shader_ctx->shader->shader;
623 struct lp_build_context * base = &bld_base->base;
624 struct lp_build_context * uint =
625 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
626 struct tgsi_parse_context *parse = &si_shader_ctx->parse;
627 LLVMValueRef args[9];
628 LLVMValueRef last_args[9] = { 0 };
629 unsigned semantic_name;
630 unsigned color_count = 0;
631 unsigned param_count = 0;
632 int depth_index = -1, stencil_index = -1;
633
634 while (!tgsi_parse_end_of_tokens(parse)) {
635 struct tgsi_full_declaration *d =
636 &parse->FullToken.FullDeclaration;
637 unsigned target;
638 unsigned index;
639 int i;
640
641 tgsi_parse_token(parse);
642
643 if (parse->FullToken.Token.Type == TGSI_TOKEN_TYPE_PROPERTY &&
644 parse->FullToken.FullProperty.Property.PropertyName ==
645 TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS)
646 shader->fs_write_all = TRUE;
647
648 if (parse->FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
649 continue;
650
651 switch (d->Declaration.File) {
652 case TGSI_FILE_INPUT:
653 i = shader->ninput++;
654 assert(i < Elements(shader->input));
655 shader->input[i].name = d->Semantic.Name;
656 shader->input[i].sid = d->Semantic.Index;
657 shader->input[i].interpolate = d->Interp.Interpolate;
658 shader->input[i].centroid = d->Interp.Centroid;
659 continue;
660
661 case TGSI_FILE_OUTPUT:
662 i = shader->noutput++;
663 assert(i < Elements(shader->output));
664 shader->output[i].name = d->Semantic.Name;
665 shader->output[i].sid = d->Semantic.Index;
666 shader->output[i].interpolate = d->Interp.Interpolate;
667 break;
668
669 default:
670 continue;
671 }
672
673 semantic_name = d->Semantic.Name;
674 handle_semantic:
675 for (index = d->Range.First; index <= d->Range.Last; index++) {
676 /* Select the correct target */
677 switch(semantic_name) {
678 case TGSI_SEMANTIC_PSIZE:
679 shader->vs_out_misc_write = 1;
680 shader->vs_out_point_size = 1;
681 target = V_008DFC_SQ_EXP_POS + 1;
682 break;
683 case TGSI_SEMANTIC_POSITION:
684 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
685 target = V_008DFC_SQ_EXP_POS;
686 break;
687 } else {
688 depth_index = index;
689 continue;
690 }
691 case TGSI_SEMANTIC_STENCIL:
692 stencil_index = index;
693 continue;
694 case TGSI_SEMANTIC_COLOR:
695 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
696 case TGSI_SEMANTIC_BCOLOR:
697 target = V_008DFC_SQ_EXP_PARAM + param_count;
698 shader->output[i].param_offset = param_count;
699 param_count++;
700 } else {
701 target = V_008DFC_SQ_EXP_MRT + color_count;
702 if (color_count == 0 &&
703 si_shader_ctx->shader->key.ps.alpha_func != PIPE_FUNC_ALWAYS)
704 si_alpha_test(bld_base, index);
705
706 color_count++;
707 }
708 break;
709 case TGSI_SEMANTIC_CLIPDIST:
710 shader->clip_dist_write |=
711 d->Declaration.UsageMask << (d->Semantic.Index << 2);
712 target = V_008DFC_SQ_EXP_POS + 2 + d->Semantic.Index;
713 break;
714 case TGSI_SEMANTIC_CLIPVERTEX:
715 si_llvm_emit_clipvertex(bld_base, index);
716 shader->clip_dist_write = 0xFF;
717 continue;
718 case TGSI_SEMANTIC_FOG:
719 case TGSI_SEMANTIC_GENERIC:
720 target = V_008DFC_SQ_EXP_PARAM + param_count;
721 shader->output[i].param_offset = param_count;
722 param_count++;
723 break;
724 default:
725 target = 0;
726 fprintf(stderr,
727 "Warning: SI unhandled output type:%d\n",
728 semantic_name);
729 }
730
731 si_llvm_init_export_args(bld_base, d, index, target, args);
732
733 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX ?
734 (semantic_name == TGSI_SEMANTIC_POSITION) :
735 (semantic_name == TGSI_SEMANTIC_COLOR)) {
736 if (last_args[0]) {
737 lp_build_intrinsic(base->gallivm->builder,
738 "llvm.SI.export",
739 LLVMVoidTypeInContext(base->gallivm->context),
740 last_args, 9);
741 }
742
743 memcpy(last_args, args, sizeof(args));
744 } else {
745 lp_build_intrinsic(base->gallivm->builder,
746 "llvm.SI.export",
747 LLVMVoidTypeInContext(base->gallivm->context),
748 args, 9);
749 }
750
751 }
752
753 if (semantic_name == TGSI_SEMANTIC_CLIPDIST) {
754 semantic_name = TGSI_SEMANTIC_GENERIC;
755 goto handle_semantic;
756 }
757 }
758
759 if (depth_index >= 0 || stencil_index >= 0) {
760 LLVMValueRef out_ptr;
761 unsigned mask = 0;
762
763 /* Specify the target we are exporting */
764 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRTZ);
765
766 if (depth_index >= 0) {
767 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[depth_index][2];
768 args[5] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
769 mask |= 0x1;
770
771 if (stencil_index < 0) {
772 args[6] =
773 args[7] =
774 args[8] = args[5];
775 }
776 }
777
778 if (stencil_index >= 0) {
779 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[stencil_index][1];
780 args[7] =
781 args[8] =
782 args[6] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
783 mask |= 0x2;
784
785 if (depth_index < 0)
786 args[5] = args[6];
787 }
788
789 /* Specify which components to enable */
790 args[0] = lp_build_const_int32(base->gallivm, mask);
791
792 args[1] =
793 args[2] =
794 args[4] = uint->zero;
795
796 if (last_args[0])
797 lp_build_intrinsic(base->gallivm->builder,
798 "llvm.SI.export",
799 LLVMVoidTypeInContext(base->gallivm->context),
800 args, 9);
801 else
802 memcpy(last_args, args, sizeof(args));
803 }
804
805 if (!last_args[0]) {
806 assert(si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
807
808 /* Specify which components to enable */
809 last_args[0] = lp_build_const_int32(base->gallivm, 0x0);
810
811 /* Specify the target we are exporting */
812 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
813
814 /* Set COMPR flag to zero to export data as 32-bit */
815 last_args[4] = uint->zero;
816
817 /* dummy bits */
818 last_args[5]= uint->zero;
819 last_args[6]= uint->zero;
820 last_args[7]= uint->zero;
821 last_args[8]= uint->zero;
822
823 si_shader_ctx->shader->spi_shader_col_format |=
824 V_028714_SPI_SHADER_32_ABGR;
825 si_shader_ctx->shader->cb_shader_mask |= S_02823C_OUTPUT0_ENABLE(0xf);
826 }
827
828 /* Specify whether the EXEC mask represents the valid mask */
829 last_args[1] = lp_build_const_int32(base->gallivm,
830 si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
831
832 if (shader->fs_write_all && shader->nr_cbufs > 1) {
833 int i;
834
835 /* Specify that this is not yet the last export */
836 last_args[2] = lp_build_const_int32(base->gallivm, 0);
837
838 for (i = 1; i < shader->nr_cbufs; i++) {
839 /* Specify the target we are exporting */
840 last_args[3] = lp_build_const_int32(base->gallivm,
841 V_008DFC_SQ_EXP_MRT + i);
842
843 lp_build_intrinsic(base->gallivm->builder,
844 "llvm.SI.export",
845 LLVMVoidTypeInContext(base->gallivm->context),
846 last_args, 9);
847
848 si_shader_ctx->shader->spi_shader_col_format |=
849 si_shader_ctx->shader->spi_shader_col_format << 4;
850 si_shader_ctx->shader->cb_shader_mask |=
851 si_shader_ctx->shader->cb_shader_mask << 4;
852 }
853
854 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
855 }
856
857 /* Specify that this is the last export */
858 last_args[2] = lp_build_const_int32(base->gallivm, 1);
859
860 lp_build_intrinsic(base->gallivm->builder,
861 "llvm.SI.export",
862 LLVMVoidTypeInContext(base->gallivm->context),
863 last_args, 9);
864
865 /* XXX: Look up what this function does */
866 /* ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);*/
867 }
868
869 static void tex_fetch_args(
870 struct lp_build_tgsi_context * bld_base,
871 struct lp_build_emit_data * emit_data)
872 {
873 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
874 struct gallivm_state *gallivm = bld_base->base.gallivm;
875 const struct tgsi_full_instruction * inst = emit_data->inst;
876 unsigned opcode = inst->Instruction.Opcode;
877 unsigned target = inst->Texture.Texture;
878 LLVMValueRef coords[4];
879 LLVMValueRef address[16];
880 int ref_pos;
881 unsigned num_coords = tgsi_util_get_texture_coord_dim(target, &ref_pos);
882 unsigned count = 0;
883 unsigned chan;
884
885 /* Fetch and project texture coordinates */
886 coords[3] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
887 for (chan = 0; chan < 3; chan++ ) {
888 coords[chan] = lp_build_emit_fetch(bld_base,
889 emit_data->inst, 0,
890 chan);
891 if (opcode == TGSI_OPCODE_TXP)
892 coords[chan] = lp_build_emit_llvm_binary(bld_base,
893 TGSI_OPCODE_DIV,
894 coords[chan],
895 coords[3]);
896 }
897
898 if (opcode == TGSI_OPCODE_TXP)
899 coords[3] = bld_base->base.one;
900
901 /* Pack LOD bias value */
902 if (opcode == TGSI_OPCODE_TXB)
903 address[count++] = coords[3];
904
905 if (target == TGSI_TEXTURE_CUBE || target == TGSI_TEXTURE_SHADOWCUBE)
906 radeon_llvm_emit_prepare_cube_coords(bld_base, emit_data, coords);
907
908 /* Pack depth comparison value */
909 switch (target) {
910 case TGSI_TEXTURE_SHADOW1D:
911 case TGSI_TEXTURE_SHADOW1D_ARRAY:
912 case TGSI_TEXTURE_SHADOW2D:
913 case TGSI_TEXTURE_SHADOWRECT:
914 case TGSI_TEXTURE_SHADOWCUBE:
915 case TGSI_TEXTURE_SHADOW2D_ARRAY:
916 assert(ref_pos >= 0);
917 address[count++] = coords[ref_pos];
918 break;
919 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
920 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, 0);
921 }
922
923 /* Pack texture coordinates */
924 address[count++] = coords[0];
925 if (num_coords > 1)
926 address[count++] = coords[1];
927 if (num_coords > 2)
928 address[count++] = coords[2];
929
930 /* Pack array slice */
931 switch (target) {
932 case TGSI_TEXTURE_1D_ARRAY:
933 address[count++] = coords[1];
934 }
935 switch (target) {
936 case TGSI_TEXTURE_2D_ARRAY:
937 case TGSI_TEXTURE_2D_ARRAY_MSAA:
938 case TGSI_TEXTURE_SHADOW2D_ARRAY:
939 address[count++] = coords[2];
940 }
941 switch (target) {
942 case TGSI_TEXTURE_CUBE_ARRAY:
943 case TGSI_TEXTURE_SHADOW1D_ARRAY:
944 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
945 address[count++] = coords[3];
946 }
947
948 /* Pack LOD */
949 if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXF)
950 address[count++] = coords[3];
951
952 if (count > 16) {
953 assert(!"Cannot handle more than 16 texture address parameters");
954 count = 16;
955 }
956
957 for (chan = 0; chan < count; chan++ ) {
958 address[chan] = LLVMBuildBitCast(gallivm->builder,
959 address[chan],
960 LLVMInt32TypeInContext(gallivm->context),
961 "");
962 }
963
964 /* Resource */
965 emit_data->args[1] = si_shader_ctx->resources[emit_data->inst->Src[1].Register.Index];
966
967 if (opcode == TGSI_OPCODE_TXF) {
968 /* add tex offsets */
969 if (inst->Texture.NumOffsets) {
970 struct lp_build_context *uint_bld = &bld_base->uint_bld;
971 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
972 const struct tgsi_texture_offset * off = inst->TexOffsets;
973
974 assert(inst->Texture.NumOffsets == 1);
975
976 address[0] =
977 lp_build_add(uint_bld, address[0],
978 bld->immediates[off->Index][off->SwizzleX]);
979 if (num_coords > 1)
980 address[1] =
981 lp_build_add(uint_bld, address[1],
982 bld->immediates[off->Index][off->SwizzleY]);
983 if (num_coords > 2)
984 address[2] =
985 lp_build_add(uint_bld, address[2],
986 bld->immediates[off->Index][off->SwizzleZ]);
987 }
988
989 emit_data->dst_type = LLVMVectorType(
990 LLVMInt32TypeInContext(bld_base->base.gallivm->context),
991 4);
992
993 emit_data->arg_count = 3;
994 } else {
995 /* Sampler */
996 emit_data->args[2] = si_shader_ctx->samplers[emit_data->inst->Src[1].Register.Index];
997
998 emit_data->dst_type = LLVMVectorType(
999 LLVMFloatTypeInContext(bld_base->base.gallivm->context),
1000 4);
1001
1002 emit_data->arg_count = 4;
1003 }
1004
1005 /* Dimensions */
1006 emit_data->args[emit_data->arg_count - 1] =
1007 lp_build_const_int32(bld_base->base.gallivm, target);
1008
1009 /* Pad to power of two vector */
1010 while (count < util_next_power_of_two(count))
1011 address[count++] = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
1012
1013 emit_data->args[0] = lp_build_gather_values(gallivm, address, count);
1014 }
1015
1016 static void build_tex_intrinsic(const struct lp_build_tgsi_action * action,
1017 struct lp_build_tgsi_context * bld_base,
1018 struct lp_build_emit_data * emit_data)
1019 {
1020 struct lp_build_context * base = &bld_base->base;
1021 char intr_name[23];
1022
1023 sprintf(intr_name, "%sv%ui32", action->intr_name,
1024 LLVMGetVectorSize(LLVMTypeOf(emit_data->args[0])));
1025
1026 emit_data->output[emit_data->chan] = build_intrinsic(
1027 base->gallivm->builder, intr_name, emit_data->dst_type,
1028 emit_data->args, emit_data->arg_count,
1029 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1030 }
1031
1032 static void txq_fetch_args(
1033 struct lp_build_tgsi_context * bld_base,
1034 struct lp_build_emit_data * emit_data)
1035 {
1036 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
1037 const struct tgsi_full_instruction *inst = emit_data->inst;
1038
1039 /* Mip level */
1040 emit_data->args[0] = lp_build_emit_fetch(bld_base, inst, 0, TGSI_CHAN_X);
1041
1042 /* Resource */
1043 emit_data->args[1] = si_shader_ctx->resources[inst->Src[1].Register.Index];
1044
1045 /* Dimensions */
1046 emit_data->args[2] = lp_build_const_int32(bld_base->base.gallivm,
1047 inst->Texture.Texture);
1048
1049 emit_data->arg_count = 3;
1050
1051 emit_data->dst_type = LLVMVectorType(
1052 LLVMInt32TypeInContext(bld_base->base.gallivm->context),
1053 4);
1054 }
1055
1056 static const struct lp_build_tgsi_action tex_action = {
1057 .fetch_args = tex_fetch_args,
1058 .emit = build_tex_intrinsic,
1059 .intr_name = "llvm.SI.sample."
1060 };
1061
1062 static const struct lp_build_tgsi_action txb_action = {
1063 .fetch_args = tex_fetch_args,
1064 .emit = build_tex_intrinsic,
1065 .intr_name = "llvm.SI.sampleb."
1066 };
1067
1068 static const struct lp_build_tgsi_action txf_action = {
1069 .fetch_args = tex_fetch_args,
1070 .emit = build_tex_intrinsic,
1071 .intr_name = "llvm.SI.imageload."
1072 };
1073
1074 static const struct lp_build_tgsi_action txl_action = {
1075 .fetch_args = tex_fetch_args,
1076 .emit = build_tex_intrinsic,
1077 .intr_name = "llvm.SI.samplel."
1078 };
1079
1080 static const struct lp_build_tgsi_action txq_action = {
1081 .fetch_args = txq_fetch_args,
1082 .emit = build_tgsi_intrinsic_nomem,
1083 .intr_name = "llvm.SI.resinfo"
1084 };
1085
1086 static void create_meta_data(struct si_shader_context *si_shader_ctx)
1087 {
1088 struct gallivm_state *gallivm = si_shader_ctx->radeon_bld.soa.bld_base.base.gallivm;
1089 LLVMValueRef args[3];
1090
1091 args[0] = LLVMMDStringInContext(gallivm->context, "const", 5);
1092 args[1] = 0;
1093 args[2] = lp_build_const_int32(gallivm, 1);
1094
1095 si_shader_ctx->const_md = LLVMMDNodeInContext(gallivm->context, args, 3);
1096 }
1097
1098 static void create_function(struct si_shader_context *si_shader_ctx)
1099 {
1100 struct gallivm_state *gallivm = si_shader_ctx->radeon_bld.soa.bld_base.base.gallivm;
1101 LLVMTypeRef params[20], f32, i8, i32, v2i32, v3i32;
1102 unsigned i;
1103
1104 i8 = LLVMInt8TypeInContext(gallivm->context);
1105 i32 = LLVMInt32TypeInContext(gallivm->context);
1106 f32 = LLVMFloatTypeInContext(gallivm->context);
1107 v2i32 = LLVMVectorType(i32, 2);
1108 v3i32 = LLVMVectorType(i32, 3);
1109
1110 params[SI_PARAM_CONST] = LLVMPointerType(LLVMVectorType(i8, 16), CONST_ADDR_SPACE);
1111 params[SI_PARAM_SAMPLER] = params[SI_PARAM_CONST];
1112 params[SI_PARAM_RESOURCE] = LLVMPointerType(LLVMVectorType(i8, 32), CONST_ADDR_SPACE);
1113
1114 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
1115 params[SI_PARAM_VERTEX_BUFFER] = params[SI_PARAM_SAMPLER];
1116 params[SI_PARAM_START_INSTANCE] = i32;
1117 params[SI_PARAM_VERTEX_ID] = i32;
1118 params[SI_PARAM_DUMMY_0] = i32;
1119 params[SI_PARAM_DUMMY_1] = i32;
1120 params[SI_PARAM_INSTANCE_ID] = i32;
1121 radeon_llvm_create_func(&si_shader_ctx->radeon_bld, params, 9);
1122
1123 } else {
1124 params[SI_PARAM_PRIM_MASK] = i32;
1125 params[SI_PARAM_PERSP_SAMPLE] = v2i32;
1126 params[SI_PARAM_PERSP_CENTER] = v2i32;
1127 params[SI_PARAM_PERSP_CENTROID] = v2i32;
1128 params[SI_PARAM_PERSP_PULL_MODEL] = v3i32;
1129 params[SI_PARAM_LINEAR_SAMPLE] = v2i32;
1130 params[SI_PARAM_LINEAR_CENTER] = v2i32;
1131 params[SI_PARAM_LINEAR_CENTROID] = v2i32;
1132 params[SI_PARAM_LINE_STIPPLE_TEX] = f32;
1133 params[SI_PARAM_POS_X_FLOAT] = f32;
1134 params[SI_PARAM_POS_Y_FLOAT] = f32;
1135 params[SI_PARAM_POS_Z_FLOAT] = f32;
1136 params[SI_PARAM_POS_W_FLOAT] = f32;
1137 params[SI_PARAM_FRONT_FACE] = f32;
1138 params[SI_PARAM_ANCILLARY] = f32;
1139 params[SI_PARAM_SAMPLE_COVERAGE] = f32;
1140 params[SI_PARAM_POS_FIXED_PT] = f32;
1141 radeon_llvm_create_func(&si_shader_ctx->radeon_bld, params, 20);
1142 }
1143
1144 radeon_llvm_shader_type(si_shader_ctx->radeon_bld.main_fn, si_shader_ctx->type);
1145 for (i = SI_PARAM_CONST; i <= SI_PARAM_VERTEX_BUFFER; ++i) {
1146 LLVMValueRef P = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, i);
1147 LLVMAddAttribute(P, LLVMInRegAttribute);
1148 }
1149
1150 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
1151 LLVMValueRef P = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
1152 SI_PARAM_START_INSTANCE);
1153 LLVMAddAttribute(P, LLVMInRegAttribute);
1154 }
1155 }
1156
1157 static void preload_constants(struct si_shader_context *si_shader_ctx)
1158 {
1159 struct lp_build_tgsi_context * bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
1160 struct gallivm_state * gallivm = bld_base->base.gallivm;
1161 const struct tgsi_shader_info * info = bld_base->info;
1162
1163 unsigned i, num_const = info->file_max[TGSI_FILE_CONSTANT] + 1;
1164
1165 LLVMValueRef ptr;
1166
1167 if (num_const == 0)
1168 return;
1169
1170 /* Allocate space for the constant values */
1171 si_shader_ctx->constants = CALLOC(num_const * 4, sizeof(LLVMValueRef));
1172
1173 /* Load the resource descriptor */
1174 ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
1175 si_shader_ctx->const_resource = build_indexed_load(si_shader_ctx, ptr, bld_base->uint_bld.zero);
1176
1177 /* Load the constants, we rely on the code sinking to do the rest */
1178 for (i = 0; i < num_const * 4; ++i) {
1179 LLVMValueRef args[2] = {
1180 si_shader_ctx->const_resource,
1181 lp_build_const_int32(gallivm, i * 4)
1182 };
1183 si_shader_ctx->constants[i] = build_intrinsic(gallivm->builder, "llvm.SI.load.const",
1184 bld_base->base.elem_type, args, 2, LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1185 }
1186 }
1187
1188 static void preload_samplers(struct si_shader_context *si_shader_ctx)
1189 {
1190 struct lp_build_tgsi_context * bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
1191 struct gallivm_state * gallivm = bld_base->base.gallivm;
1192 const struct tgsi_shader_info * info = bld_base->info;
1193
1194 unsigned i, num_samplers = info->file_max[TGSI_FILE_SAMPLER] + 1;
1195
1196 LLVMValueRef res_ptr, samp_ptr;
1197 LLVMValueRef offset;
1198
1199 if (num_samplers == 0)
1200 return;
1201
1202 /* Allocate space for the values */
1203 si_shader_ctx->resources = CALLOC(num_samplers, sizeof(LLVMValueRef));
1204 si_shader_ctx->samplers = CALLOC(num_samplers, sizeof(LLVMValueRef));
1205
1206 res_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_RESOURCE);
1207 samp_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_SAMPLER);
1208
1209 /* Load the resources and samplers, we rely on the code sinking to do the rest */
1210 for (i = 0; i < num_samplers; ++i) {
1211
1212 /* Resource */
1213 offset = lp_build_const_int32(gallivm, i);
1214 si_shader_ctx->resources[i] = build_indexed_load(si_shader_ctx, res_ptr, offset);
1215
1216 /* Sampler */
1217 offset = lp_build_const_int32(gallivm, i);
1218 si_shader_ctx->samplers[i] = build_indexed_load(si_shader_ctx, samp_ptr, offset);
1219 }
1220 }
1221
1222 int si_compile_llvm(struct r600_context *rctx, struct si_pipe_shader *shader,
1223 LLVMModuleRef mod)
1224 {
1225 unsigned i;
1226 uint32_t *ptr;
1227 bool dump;
1228 struct radeon_llvm_binary binary;
1229
1230 dump = debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE);
1231
1232 memset(&binary, 0, sizeof(binary));
1233 radeon_llvm_compile(mod, &binary,
1234 r600_get_llvm_processor_name(rctx->screen->family), dump);
1235 if (dump) {
1236 fprintf(stderr, "SI CODE:\n");
1237 for (i = 0; i < binary.code_size; i+=4 ) {
1238 fprintf(stderr, "%02x%02x%02x%02x\n", binary.code[i + 3],
1239 binary.code[i + 2], binary.code[i + 1],
1240 binary.code[i]);
1241 }
1242 }
1243
1244 /* XXX: We may be able to emit some of these values directly rather than
1245 * extracting fields to be emitted later.
1246 */
1247 for (i = 0; i < binary.config_size; i+= 8) {
1248 unsigned reg = util_le32_to_cpu(*(uint32_t*)(binary.config + i));
1249 unsigned value = util_le32_to_cpu(*(uint32_t*)(binary.config + i + 4));
1250 switch (reg) {
1251 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
1252 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
1253 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
1254 case R_00B848_COMPUTE_PGM_RSRC1:
1255 shader->num_sgprs = (G_00B028_SGPRS(value) + 1) * 8;
1256 shader->num_vgprs = (G_00B028_VGPRS(value) + 1) * 4;
1257 break;
1258 case R_0286CC_SPI_PS_INPUT_ENA:
1259 shader->spi_ps_input_ena = value;
1260 break;
1261 default:
1262 fprintf(stderr, "Warning: Compiler emitted unknown "
1263 "config register: 0x%x\n", reg);
1264 break;
1265 }
1266 }
1267
1268 /* copy new shader */
1269 si_resource_reference(&shader->bo, NULL);
1270 shader->bo = si_resource_create_custom(rctx->context.screen, PIPE_USAGE_IMMUTABLE,
1271 binary.code_size);
1272 if (shader->bo == NULL) {
1273 return -ENOMEM;
1274 }
1275
1276 ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
1277 if (0 /*R600_BIG_ENDIAN*/) {
1278 for (i = 0; i < binary.code_size / 4; ++i) {
1279 ptr[i] = util_bswap32(*(uint32_t*)(binary.code + i*4));
1280 }
1281 } else {
1282 memcpy(ptr, binary.code, binary.code_size);
1283 }
1284 rctx->ws->buffer_unmap(shader->bo->cs_buf);
1285
1286 free(binary.code);
1287 free(binary.config);
1288
1289 return 0;
1290 }
1291
1292 int si_pipe_shader_create(
1293 struct pipe_context *ctx,
1294 struct si_pipe_shader *shader)
1295 {
1296 struct r600_context *rctx = (struct r600_context*)ctx;
1297 struct si_pipe_shader_selector *sel = shader->selector;
1298 struct si_shader_context si_shader_ctx;
1299 struct tgsi_shader_info shader_info;
1300 struct lp_build_tgsi_context * bld_base;
1301 LLVMModuleRef mod;
1302 bool dump;
1303 int r = 0;
1304
1305 dump = debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE);
1306
1307 assert(shader->shader.noutput == 0);
1308 assert(shader->shader.ninterp == 0);
1309 assert(shader->shader.ninput == 0);
1310
1311 memset(&si_shader_ctx, 0, sizeof(si_shader_ctx));
1312 radeon_llvm_context_init(&si_shader_ctx.radeon_bld);
1313 bld_base = &si_shader_ctx.radeon_bld.soa.bld_base;
1314
1315 tgsi_scan_shader(sel->tokens, &shader_info);
1316 shader->shader.uses_kill = shader_info.uses_kill;
1317 shader->shader.uses_instanceid = shader_info.uses_instanceid;
1318 bld_base->info = &shader_info;
1319 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
1320 bld_base->emit_epilogue = si_llvm_emit_epilogue;
1321
1322 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
1323 bld_base->op_actions[TGSI_OPCODE_TXB] = txb_action;
1324 bld_base->op_actions[TGSI_OPCODE_TXF] = txf_action;
1325 bld_base->op_actions[TGSI_OPCODE_TXL] = txl_action;
1326 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
1327 bld_base->op_actions[TGSI_OPCODE_TXQ] = txq_action;
1328
1329 si_shader_ctx.radeon_bld.load_input = declare_input;
1330 si_shader_ctx.radeon_bld.load_system_value = declare_system_value;
1331 si_shader_ctx.tokens = sel->tokens;
1332 tgsi_parse_init(&si_shader_ctx.parse, si_shader_ctx.tokens);
1333 si_shader_ctx.shader = shader;
1334 si_shader_ctx.type = si_shader_ctx.parse.FullHeader.Processor.Processor;
1335
1336 create_meta_data(&si_shader_ctx);
1337 create_function(&si_shader_ctx);
1338 preload_constants(&si_shader_ctx);
1339 preload_samplers(&si_shader_ctx);
1340
1341 shader->shader.nr_cbufs = rctx->framebuffer.nr_cbufs;
1342
1343 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
1344 * conversion fails. */
1345 if (dump) {
1346 tgsi_dump(sel->tokens, 0);
1347 }
1348
1349 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
1350 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
1351 FREE(si_shader_ctx.constants);
1352 FREE(si_shader_ctx.resources);
1353 FREE(si_shader_ctx.samplers);
1354 return -EINVAL;
1355 }
1356
1357 radeon_llvm_finalize_module(&si_shader_ctx.radeon_bld);
1358
1359 mod = bld_base->base.gallivm->module;
1360 r = si_compile_llvm(rctx, shader, mod);
1361
1362 radeon_llvm_dispose(&si_shader_ctx.radeon_bld);
1363 tgsi_parse_free(&si_shader_ctx.parse);
1364
1365 FREE(si_shader_ctx.constants);
1366 FREE(si_shader_ctx.resources);
1367 FREE(si_shader_ctx.samplers);
1368
1369 return r;
1370 }
1371
1372 void si_pipe_shader_destroy(struct pipe_context *ctx, struct si_pipe_shader *shader)
1373 {
1374 si_resource_reference(&shader->bo, NULL);
1375 }