radeonsi: Handle TGSI_SEMANTIC_CLIPVERTEX
[mesa.git] / src / gallium / drivers / radeonsi / radeonsi_shader.c
1
2 /*
3 * Copyright 2012 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Tom Stellard <thomas.stellard@amd.com>
26 * Michel Dänzer <michel.daenzer@amd.com>
27 * Christian König <christian.koenig@amd.com>
28 */
29
30 #include "gallivm/lp_bld_tgsi_action.h"
31 #include "gallivm/lp_bld_const.h"
32 #include "gallivm/lp_bld_gather.h"
33 #include "gallivm/lp_bld_intr.h"
34 #include "gallivm/lp_bld_logic.h"
35 #include "gallivm/lp_bld_tgsi.h"
36 #include "gallivm/lp_bld_arit.h"
37 #include "radeon_llvm.h"
38 #include "radeon_llvm_emit.h"
39 #include "util/u_memory.h"
40 #include "tgsi/tgsi_info.h"
41 #include "tgsi/tgsi_parse.h"
42 #include "tgsi/tgsi_scan.h"
43 #include "tgsi/tgsi_dump.h"
44
45 #include "radeonsi_pipe.h"
46 #include "radeonsi_shader.h"
47 #include "si_state.h"
48 #include "sid.h"
49
50 #include <assert.h>
51 #include <errno.h>
52 #include <stdio.h>
53
54 struct si_shader_context
55 {
56 struct radeon_llvm_context radeon_bld;
57 struct tgsi_parse_context parse;
58 struct tgsi_token * tokens;
59 struct si_pipe_shader *shader;
60 unsigned type; /* TGSI_PROCESSOR_* specifies the type of shader. */
61 LLVMValueRef const_md;
62 LLVMValueRef const_resource;
63 LLVMValueRef *constants;
64 LLVMValueRef *resources;
65 LLVMValueRef *samplers;
66 };
67
68 static struct si_shader_context * si_shader_context(
69 struct lp_build_tgsi_context * bld_base)
70 {
71 return (struct si_shader_context *)bld_base;
72 }
73
74
75 #define PERSPECTIVE_BASE 0
76 #define LINEAR_BASE 9
77
78 #define SAMPLE_OFFSET 0
79 #define CENTER_OFFSET 2
80 #define CENTROID_OFSET 4
81
82 #define USE_SGPR_MAX_SUFFIX_LEN 5
83 #define CONST_ADDR_SPACE 2
84 #define USER_SGPR_ADDR_SPACE 8
85
86 /**
87 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad
88 *
89 * @param offset The offset parameter specifies the number of
90 * elements to offset, not the number of bytes or dwords. An element is the
91 * the type pointed to by the base_ptr parameter (e.g. int is the element of
92 * an int* pointer)
93 *
94 * When LLVM lowers the load instruction, it will convert the element offset
95 * into a dword offset automatically.
96 *
97 */
98 static LLVMValueRef build_indexed_load(
99 struct si_shader_context * si_shader_ctx,
100 LLVMValueRef base_ptr,
101 LLVMValueRef offset)
102 {
103 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
104
105 LLVMValueRef computed_ptr = LLVMBuildGEP(
106 base->gallivm->builder, base_ptr, &offset, 1, "");
107
108 LLVMValueRef result = LLVMBuildLoad(base->gallivm->builder, computed_ptr, "");
109 LLVMSetMetadata(result, 1, si_shader_ctx->const_md);
110 return result;
111 }
112
113 static LLVMValueRef get_instance_index(
114 struct radeon_llvm_context * radeon_bld,
115 unsigned divisor)
116 {
117 struct gallivm_state * gallivm = radeon_bld->soa.bld_base.base.gallivm;
118
119 LLVMValueRef result = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_INSTANCE_ID);
120 result = LLVMBuildAdd(gallivm->builder, result, LLVMGetParam(
121 radeon_bld->main_fn, SI_PARAM_START_INSTANCE), "");
122
123 if (divisor > 1)
124 result = LLVMBuildUDiv(gallivm->builder, result,
125 lp_build_const_int32(gallivm, divisor), "");
126
127 return result;
128 }
129
130 static void declare_input_vs(
131 struct si_shader_context * si_shader_ctx,
132 unsigned input_index,
133 const struct tgsi_full_declaration *decl)
134 {
135 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
136 unsigned divisor = si_shader_ctx->shader->key.vs.instance_divisors[input_index];
137
138 unsigned chan;
139
140 LLVMValueRef t_list_ptr;
141 LLVMValueRef t_offset;
142 LLVMValueRef t_list;
143 LLVMValueRef attribute_offset;
144 LLVMValueRef buffer_index;
145 LLVMValueRef args[3];
146 LLVMTypeRef vec4_type;
147 LLVMValueRef input;
148
149 /* Load the T list */
150 t_list_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_VERTEX_BUFFER);
151
152 t_offset = lp_build_const_int32(base->gallivm, input_index);
153
154 t_list = build_indexed_load(si_shader_ctx, t_list_ptr, t_offset);
155
156 /* Build the attribute offset */
157 attribute_offset = lp_build_const_int32(base->gallivm, 0);
158
159 if (divisor) {
160 /* Build index from instance ID, start instance and divisor */
161 si_shader_ctx->shader->shader.uses_instanceid = true;
162 buffer_index = get_instance_index(&si_shader_ctx->radeon_bld, divisor);
163 } else {
164 /* Load the buffer index, which is always stored in VGPR0
165 * for Vertex Shaders */
166 buffer_index = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_VERTEX_ID);
167 }
168
169 vec4_type = LLVMVectorType(base->elem_type, 4);
170 args[0] = t_list;
171 args[1] = attribute_offset;
172 args[2] = buffer_index;
173 input = build_intrinsic(base->gallivm->builder,
174 "llvm.SI.vs.load.input", vec4_type, args, 3,
175 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
176
177 /* Break up the vec4 into individual components */
178 for (chan = 0; chan < 4; chan++) {
179 LLVMValueRef llvm_chan = lp_build_const_int32(base->gallivm, chan);
180 /* XXX: Use a helper function for this. There is one in
181 * tgsi_llvm.c. */
182 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, chan)] =
183 LLVMBuildExtractElement(base->gallivm->builder,
184 input, llvm_chan, "");
185 }
186 }
187
188 static void declare_input_fs(
189 struct si_shader_context * si_shader_ctx,
190 unsigned input_index,
191 const struct tgsi_full_declaration *decl)
192 {
193 struct si_shader *shader = &si_shader_ctx->shader->shader;
194 struct lp_build_context * base =
195 &si_shader_ctx->radeon_bld.soa.bld_base.base;
196 struct gallivm_state * gallivm = base->gallivm;
197 LLVMTypeRef input_type = LLVMFloatTypeInContext(gallivm->context);
198 LLVMValueRef main_fn = si_shader_ctx->radeon_bld.main_fn;
199
200 LLVMValueRef interp_param;
201 const char * intr_name;
202
203 /* This value is:
204 * [15:0] NewPrimMask (Bit mask for each quad. It is set it the
205 * quad begins a new primitive. Bit 0 always needs
206 * to be unset)
207 * [32:16] ParamOffset
208 *
209 */
210 LLVMValueRef params = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_PRIM_MASK);
211 LLVMValueRef attr_number;
212
213 unsigned chan;
214
215 if (decl->Semantic.Name == TGSI_SEMANTIC_POSITION) {
216 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
217 unsigned soa_index =
218 radeon_llvm_reg_index_soa(input_index, chan);
219 si_shader_ctx->radeon_bld.inputs[soa_index] =
220 LLVMGetParam(main_fn, SI_PARAM_POS_X_FLOAT + chan);
221
222 if (chan == 3)
223 /* RCP for fragcoord.w */
224 si_shader_ctx->radeon_bld.inputs[soa_index] =
225 LLVMBuildFDiv(gallivm->builder,
226 lp_build_const_float(gallivm, 1.0f),
227 si_shader_ctx->radeon_bld.inputs[soa_index],
228 "");
229 }
230 return;
231 }
232
233 if (decl->Semantic.Name == TGSI_SEMANTIC_FACE) {
234 LLVMValueRef face, is_face_positive;
235
236 face = LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE);
237
238 is_face_positive = LLVMBuildFCmp(gallivm->builder,
239 LLVMRealUGT, face,
240 lp_build_const_float(gallivm, 0.0f),
241 "");
242
243 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 0)] =
244 LLVMBuildSelect(gallivm->builder,
245 is_face_positive,
246 lp_build_const_float(gallivm, 1.0f),
247 lp_build_const_float(gallivm, 0.0f),
248 "");
249 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 1)] =
250 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 2)] =
251 lp_build_const_float(gallivm, 0.0f);
252 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, 3)] =
253 lp_build_const_float(gallivm, 1.0f);
254
255 return;
256 }
257
258 shader->input[input_index].param_offset = shader->ninterp++;
259 attr_number = lp_build_const_int32(gallivm,
260 shader->input[input_index].param_offset);
261
262 /* XXX: Handle all possible interpolation modes */
263 switch (decl->Interp.Interpolate) {
264 case TGSI_INTERPOLATE_COLOR:
265 if (si_shader_ctx->shader->key.ps.flatshade) {
266 interp_param = 0;
267 } else {
268 if (decl->Interp.Centroid)
269 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTROID);
270 else
271 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTER);
272 }
273 break;
274 case TGSI_INTERPOLATE_CONSTANT:
275 interp_param = 0;
276 break;
277 case TGSI_INTERPOLATE_LINEAR:
278 if (decl->Interp.Centroid)
279 interp_param = LLVMGetParam(main_fn, SI_PARAM_LINEAR_CENTROID);
280 else
281 interp_param = LLVMGetParam(main_fn, SI_PARAM_LINEAR_CENTER);
282 break;
283 case TGSI_INTERPOLATE_PERSPECTIVE:
284 if (decl->Interp.Centroid)
285 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTROID);
286 else
287 interp_param = LLVMGetParam(main_fn, SI_PARAM_PERSP_CENTER);
288 break;
289 default:
290 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
291 return;
292 }
293
294 intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
295
296 /* XXX: Could there be more than TGSI_NUM_CHANNELS (4) ? */
297 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR &&
298 si_shader_ctx->shader->key.ps.color_two_side) {
299 LLVMValueRef args[4];
300 LLVMValueRef face, is_face_positive;
301 LLVMValueRef back_attr_number =
302 lp_build_const_int32(gallivm,
303 shader->input[input_index].param_offset + 1);
304
305 face = LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE);
306
307 is_face_positive = LLVMBuildFCmp(gallivm->builder,
308 LLVMRealUGT, face,
309 lp_build_const_float(gallivm, 0.0f),
310 "");
311
312 args[2] = params;
313 args[3] = interp_param;
314 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
315 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
316 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
317 LLVMValueRef front, back;
318
319 args[0] = llvm_chan;
320 args[1] = attr_number;
321 front = build_intrinsic(base->gallivm->builder, intr_name,
322 input_type, args, args[3] ? 4 : 3,
323 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
324
325 args[1] = back_attr_number;
326 back = build_intrinsic(base->gallivm->builder, intr_name,
327 input_type, args, args[3] ? 4 : 3,
328 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
329
330 si_shader_ctx->radeon_bld.inputs[soa_index] =
331 LLVMBuildSelect(gallivm->builder,
332 is_face_positive,
333 front,
334 back,
335 "");
336 }
337
338 shader->ninterp++;
339 } else {
340 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
341 LLVMValueRef args[4];
342 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
343 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
344 args[0] = llvm_chan;
345 args[1] = attr_number;
346 args[2] = params;
347 args[3] = interp_param;
348 si_shader_ctx->radeon_bld.inputs[soa_index] =
349 build_intrinsic(base->gallivm->builder, intr_name,
350 input_type, args, args[3] ? 4 : 3,
351 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
352 }
353 }
354 }
355
356 static void declare_input(
357 struct radeon_llvm_context * radeon_bld,
358 unsigned input_index,
359 const struct tgsi_full_declaration *decl)
360 {
361 struct si_shader_context * si_shader_ctx =
362 si_shader_context(&radeon_bld->soa.bld_base);
363 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
364 declare_input_vs(si_shader_ctx, input_index, decl);
365 } else if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
366 declare_input_fs(si_shader_ctx, input_index, decl);
367 } else {
368 fprintf(stderr, "Warning: Unsupported shader type,\n");
369 }
370 }
371
372 static void declare_system_value(
373 struct radeon_llvm_context * radeon_bld,
374 unsigned index,
375 const struct tgsi_full_declaration *decl)
376 {
377
378 LLVMValueRef value = 0;
379
380 switch (decl->Semantic.Name) {
381 case TGSI_SEMANTIC_INSTANCEID:
382 value = get_instance_index(radeon_bld, 1);
383 break;
384
385 case TGSI_SEMANTIC_VERTEXID:
386 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_VERTEX_ID);
387 break;
388
389 default:
390 assert(!"unknown system value");
391 return;
392 }
393
394 radeon_bld->system_values[index] = value;
395 }
396
397 static LLVMValueRef fetch_constant(
398 struct lp_build_tgsi_context * bld_base,
399 const struct tgsi_full_src_register *reg,
400 enum tgsi_opcode_type type,
401 unsigned swizzle)
402 {
403 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
404 struct lp_build_context * base = &bld_base->base;
405 const struct tgsi_ind_register *ireg = &reg->Indirect;
406 unsigned idx;
407
408 LLVMValueRef args[2];
409 LLVMValueRef addr;
410 LLVMValueRef result;
411
412 if (swizzle == LP_CHAN_ALL) {
413 unsigned chan;
414 LLVMValueRef values[4];
415 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
416 values[chan] = fetch_constant(bld_base, reg, type, chan);
417
418 return lp_build_gather_values(bld_base->base.gallivm, values, 4);
419 }
420
421 idx = reg->Register.Index * 4 + swizzle;
422 if (!reg->Register.Indirect)
423 return bitcast(bld_base, type, si_shader_ctx->constants[idx]);
424
425 args[0] = si_shader_ctx->const_resource;
426 args[1] = lp_build_const_int32(base->gallivm, idx * 4);
427 addr = si_shader_ctx->radeon_bld.soa.addr[ireg->Index][ireg->Swizzle];
428 addr = LLVMBuildLoad(base->gallivm->builder, addr, "load addr reg");
429 addr = lp_build_mul_imm(&bld_base->uint_bld, addr, 16);
430 args[1] = lp_build_add(&bld_base->uint_bld, addr, args[1]);
431
432 result = build_intrinsic(base->gallivm->builder, "llvm.SI.load.const", base->elem_type,
433 args, 2, LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
434
435 return bitcast(bld_base, type, result);
436 }
437
438 /* Initialize arguments for the shader export intrinsic */
439 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
440 struct tgsi_full_declaration *d,
441 unsigned index,
442 unsigned target,
443 LLVMValueRef *args)
444 {
445 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
446 struct lp_build_context *uint =
447 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
448 struct lp_build_context *base = &bld_base->base;
449 unsigned compressed = 0;
450 unsigned chan;
451
452 if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
453 int cbuf = target - V_008DFC_SQ_EXP_MRT;
454
455 if (cbuf >= 0 && cbuf < 8) {
456 compressed = (si_shader_ctx->shader->key.ps.export_16bpc >> cbuf) & 0x1;
457
458 if (compressed)
459 si_shader_ctx->shader->spi_shader_col_format |=
460 V_028714_SPI_SHADER_FP16_ABGR << (4 * cbuf);
461 else
462 si_shader_ctx->shader->spi_shader_col_format |=
463 V_028714_SPI_SHADER_32_ABGR << (4 * cbuf);
464 }
465 }
466
467 if (compressed) {
468 /* Pixel shader needs to pack output values before export */
469 for (chan = 0; chan < 2; chan++ ) {
470 LLVMValueRef *out_ptr =
471 si_shader_ctx->radeon_bld.soa.outputs[index];
472 args[0] = LLVMBuildLoad(base->gallivm->builder,
473 out_ptr[2 * chan], "");
474 args[1] = LLVMBuildLoad(base->gallivm->builder,
475 out_ptr[2 * chan + 1], "");
476 args[chan + 5] =
477 build_intrinsic(base->gallivm->builder,
478 "llvm.SI.packf16",
479 LLVMInt32TypeInContext(base->gallivm->context),
480 args, 2,
481 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
482 args[chan + 7] = args[chan + 5] =
483 LLVMBuildBitCast(base->gallivm->builder,
484 args[chan + 5],
485 LLVMFloatTypeInContext(base->gallivm->context),
486 "");
487 }
488
489 /* Set COMPR flag */
490 args[4] = uint->one;
491 } else {
492 for (chan = 0; chan < 4; chan++ ) {
493 LLVMValueRef out_ptr =
494 si_shader_ctx->radeon_bld.soa.outputs[index][chan];
495 /* +5 because the first output value will be
496 * the 6th argument to the intrinsic. */
497 args[chan + 5] = LLVMBuildLoad(base->gallivm->builder,
498 out_ptr, "");
499 }
500
501 /* Clear COMPR flag */
502 args[4] = uint->zero;
503 }
504
505 /* XXX: This controls which components of the output
506 * registers actually get exported. (e.g bit 0 means export
507 * X component, bit 1 means export Y component, etc.) I'm
508 * hard coding this to 0xf for now. In the future, we might
509 * want to do something else. */
510 args[0] = lp_build_const_int32(base->gallivm, 0xf);
511
512 /* Specify whether the EXEC mask represents the valid mask */
513 args[1] = uint->zero;
514
515 /* Specify whether this is the last export */
516 args[2] = uint->zero;
517
518 /* Specify the target we are exporting */
519 args[3] = lp_build_const_int32(base->gallivm, target);
520
521 /* XXX: We probably need to keep track of the output
522 * values, so we know what we are passing to the next
523 * stage. */
524 }
525
526 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
527 unsigned index)
528 {
529 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
530 struct gallivm_state *gallivm = bld_base->base.gallivm;
531
532 if (si_shader_ctx->shader->key.ps.alpha_func != PIPE_FUNC_NEVER) {
533 LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][3];
534 LLVMValueRef alpha_pass =
535 lp_build_cmp(&bld_base->base,
536 si_shader_ctx->shader->key.ps.alpha_func,
537 LLVMBuildLoad(gallivm->builder, out_ptr, ""),
538 lp_build_const_float(gallivm, si_shader_ctx->shader->key.ps.alpha_ref));
539 LLVMValueRef arg =
540 lp_build_select(&bld_base->base,
541 alpha_pass,
542 lp_build_const_float(gallivm, 1.0f),
543 lp_build_const_float(gallivm, -1.0f));
544
545 build_intrinsic(gallivm->builder,
546 "llvm.AMDGPU.kill",
547 LLVMVoidTypeInContext(gallivm->context),
548 &arg, 1, 0);
549 } else {
550 build_intrinsic(gallivm->builder,
551 "llvm.AMDGPU.kilp",
552 LLVMVoidTypeInContext(gallivm->context),
553 NULL, 0, 0);
554 }
555 }
556
557 static void si_llvm_emit_clipvertex(struct lp_build_tgsi_context * bld_base,
558 unsigned index)
559 {
560 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
561 struct lp_build_context *base = &bld_base->base;
562 struct lp_build_context *uint = &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
563 LLVMValueRef args[9];
564 unsigned reg_index;
565 unsigned chan;
566 unsigned const_chan;
567 LLVMValueRef out_elts[4];
568 LLVMValueRef base_elt;
569 LLVMValueRef ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
570 LLVMValueRef const_resource = build_indexed_load(si_shader_ctx, ptr, uint->one);
571
572 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
573 LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][chan];
574 out_elts[chan] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
575 }
576
577 for (reg_index = 0; reg_index < 2; reg_index ++) {
578 args[5] =
579 args[6] =
580 args[7] =
581 args[8] = lp_build_const_float(base->gallivm, 0.0f);
582
583 /* Compute dot products of position and user clip plane vectors */
584 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
585 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
586 args[0] = const_resource;
587 args[1] = lp_build_const_int32(base->gallivm,
588 ((reg_index * 4 + chan) * 4 +
589 const_chan) * 4);
590 base_elt = build_intrinsic(base->gallivm->builder,
591 "llvm.SI.load.const",
592 base->elem_type,
593 args, 2,
594 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
595 args[5 + chan] =
596 lp_build_add(base, args[5 + chan],
597 lp_build_mul(base, base_elt,
598 out_elts[const_chan]));
599 }
600 }
601
602 args[0] = lp_build_const_int32(base->gallivm, 0xf);
603 args[1] = uint->zero;
604 args[2] = uint->zero;
605 args[3] = lp_build_const_int32(base->gallivm,
606 V_008DFC_SQ_EXP_POS + 2 + reg_index);
607 args[4] = uint->zero;
608 lp_build_intrinsic(base->gallivm->builder,
609 "llvm.SI.export",
610 LLVMVoidTypeInContext(base->gallivm->context),
611 args, 9);
612 }
613 }
614
615 /* XXX: This is partially implemented for VS only at this point. It is not complete */
616 static void si_llvm_emit_epilogue(struct lp_build_tgsi_context * bld_base)
617 {
618 struct si_shader_context * si_shader_ctx = si_shader_context(bld_base);
619 struct si_shader * shader = &si_shader_ctx->shader->shader;
620 struct lp_build_context * base = &bld_base->base;
621 struct lp_build_context * uint =
622 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
623 struct tgsi_parse_context *parse = &si_shader_ctx->parse;
624 LLVMValueRef args[9];
625 LLVMValueRef last_args[9] = { 0 };
626 unsigned color_count = 0;
627 unsigned param_count = 0;
628 int depth_index = -1, stencil_index = -1;
629
630 while (!tgsi_parse_end_of_tokens(parse)) {
631 struct tgsi_full_declaration *d =
632 &parse->FullToken.FullDeclaration;
633 unsigned target;
634 unsigned index;
635 int i;
636
637 tgsi_parse_token(parse);
638
639 if (parse->FullToken.Token.Type == TGSI_TOKEN_TYPE_PROPERTY &&
640 parse->FullToken.FullProperty.Property.PropertyName ==
641 TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS)
642 shader->fs_write_all = TRUE;
643
644 if (parse->FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
645 continue;
646
647 switch (d->Declaration.File) {
648 case TGSI_FILE_INPUT:
649 i = shader->ninput++;
650 assert(i < Elements(shader->input));
651 shader->input[i].name = d->Semantic.Name;
652 shader->input[i].sid = d->Semantic.Index;
653 shader->input[i].interpolate = d->Interp.Interpolate;
654 shader->input[i].centroid = d->Interp.Centroid;
655 continue;
656
657 case TGSI_FILE_OUTPUT:
658 i = shader->noutput++;
659 assert(i < Elements(shader->output));
660 shader->output[i].name = d->Semantic.Name;
661 shader->output[i].sid = d->Semantic.Index;
662 shader->output[i].interpolate = d->Interp.Interpolate;
663 break;
664
665 default:
666 continue;
667 }
668
669 for (index = d->Range.First; index <= d->Range.Last; index++) {
670 /* Select the correct target */
671 switch(d->Semantic.Name) {
672 case TGSI_SEMANTIC_PSIZE:
673 shader->vs_out_misc_write = 1;
674 shader->vs_out_point_size = 1;
675 target = V_008DFC_SQ_EXP_POS + 1;
676 break;
677 case TGSI_SEMANTIC_POSITION:
678 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
679 target = V_008DFC_SQ_EXP_POS;
680 break;
681 } else {
682 depth_index = index;
683 continue;
684 }
685 case TGSI_SEMANTIC_STENCIL:
686 stencil_index = index;
687 continue;
688 case TGSI_SEMANTIC_COLOR:
689 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
690 case TGSI_SEMANTIC_BCOLOR:
691 target = V_008DFC_SQ_EXP_PARAM + param_count;
692 shader->output[i].param_offset = param_count;
693 param_count++;
694 } else {
695 target = V_008DFC_SQ_EXP_MRT + color_count;
696 if (color_count == 0 &&
697 si_shader_ctx->shader->key.ps.alpha_func != PIPE_FUNC_ALWAYS)
698 si_alpha_test(bld_base, index);
699
700 color_count++;
701 }
702 break;
703 case TGSI_SEMANTIC_CLIPVERTEX:
704 si_llvm_emit_clipvertex(bld_base, index);
705 shader->clip_dist_write = 0xFF;
706 continue;
707 case TGSI_SEMANTIC_FOG:
708 case TGSI_SEMANTIC_GENERIC:
709 target = V_008DFC_SQ_EXP_PARAM + param_count;
710 shader->output[i].param_offset = param_count;
711 param_count++;
712 break;
713 default:
714 target = 0;
715 fprintf(stderr,
716 "Warning: SI unhandled output type:%d\n",
717 d->Semantic.Name);
718 }
719
720 si_llvm_init_export_args(bld_base, d, index, target, args);
721
722 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX ?
723 (d->Semantic.Name == TGSI_SEMANTIC_POSITION) :
724 (d->Semantic.Name == TGSI_SEMANTIC_COLOR)) {
725 if (last_args[0]) {
726 lp_build_intrinsic(base->gallivm->builder,
727 "llvm.SI.export",
728 LLVMVoidTypeInContext(base->gallivm->context),
729 last_args, 9);
730 }
731
732 memcpy(last_args, args, sizeof(args));
733 } else {
734 lp_build_intrinsic(base->gallivm->builder,
735 "llvm.SI.export",
736 LLVMVoidTypeInContext(base->gallivm->context),
737 args, 9);
738 }
739
740 }
741 }
742
743 if (depth_index >= 0 || stencil_index >= 0) {
744 LLVMValueRef out_ptr;
745 unsigned mask = 0;
746
747 /* Specify the target we are exporting */
748 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRTZ);
749
750 if (depth_index >= 0) {
751 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[depth_index][2];
752 args[5] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
753 mask |= 0x1;
754
755 if (stencil_index < 0) {
756 args[6] =
757 args[7] =
758 args[8] = args[5];
759 }
760 }
761
762 if (stencil_index >= 0) {
763 out_ptr = si_shader_ctx->radeon_bld.soa.outputs[stencil_index][1];
764 args[7] =
765 args[8] =
766 args[6] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
767 mask |= 0x2;
768
769 if (depth_index < 0)
770 args[5] = args[6];
771 }
772
773 /* Specify which components to enable */
774 args[0] = lp_build_const_int32(base->gallivm, mask);
775
776 args[1] =
777 args[2] =
778 args[4] = uint->zero;
779
780 if (last_args[0])
781 lp_build_intrinsic(base->gallivm->builder,
782 "llvm.SI.export",
783 LLVMVoidTypeInContext(base->gallivm->context),
784 args, 9);
785 else
786 memcpy(last_args, args, sizeof(args));
787 }
788
789 if (!last_args[0]) {
790 assert(si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
791
792 /* Specify which components to enable */
793 last_args[0] = lp_build_const_int32(base->gallivm, 0x0);
794
795 /* Specify the target we are exporting */
796 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
797
798 /* Set COMPR flag to zero to export data as 32-bit */
799 last_args[4] = uint->zero;
800
801 /* dummy bits */
802 last_args[5]= uint->zero;
803 last_args[6]= uint->zero;
804 last_args[7]= uint->zero;
805 last_args[8]= uint->zero;
806
807 si_shader_ctx->shader->spi_shader_col_format |=
808 V_028714_SPI_SHADER_32_ABGR;
809 }
810
811 /* Specify whether the EXEC mask represents the valid mask */
812 last_args[1] = lp_build_const_int32(base->gallivm,
813 si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
814
815 if (shader->fs_write_all && shader->nr_cbufs > 1) {
816 int i;
817
818 /* Specify that this is not yet the last export */
819 last_args[2] = lp_build_const_int32(base->gallivm, 0);
820
821 for (i = 1; i < shader->nr_cbufs; i++) {
822 /* Specify the target we are exporting */
823 last_args[3] = lp_build_const_int32(base->gallivm,
824 V_008DFC_SQ_EXP_MRT + i);
825
826 lp_build_intrinsic(base->gallivm->builder,
827 "llvm.SI.export",
828 LLVMVoidTypeInContext(base->gallivm->context),
829 last_args, 9);
830
831 si_shader_ctx->shader->spi_shader_col_format |=
832 si_shader_ctx->shader->spi_shader_col_format << 4;
833 }
834
835 last_args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRT);
836 }
837
838 /* Specify that this is the last export */
839 last_args[2] = lp_build_const_int32(base->gallivm, 1);
840
841 lp_build_intrinsic(base->gallivm->builder,
842 "llvm.SI.export",
843 LLVMVoidTypeInContext(base->gallivm->context),
844 last_args, 9);
845
846 /* XXX: Look up what this function does */
847 /* ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);*/
848 }
849
850 static void tex_fetch_args(
851 struct lp_build_tgsi_context * bld_base,
852 struct lp_build_emit_data * emit_data)
853 {
854 struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
855 struct gallivm_state *gallivm = bld_base->base.gallivm;
856 const struct tgsi_full_instruction * inst = emit_data->inst;
857 unsigned opcode = inst->Instruction.Opcode;
858 unsigned target = inst->Texture.Texture;
859 LLVMValueRef coords[4];
860 LLVMValueRef address[16];
861 unsigned count = 0;
862 unsigned chan;
863
864 /* Fetch and project texture coordinates */
865 coords[3] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
866 for (chan = 0; chan < 3; chan++ ) {
867 coords[chan] = lp_build_emit_fetch(bld_base,
868 emit_data->inst, 0,
869 chan);
870 if (opcode == TGSI_OPCODE_TXP)
871 coords[chan] = lp_build_emit_llvm_binary(bld_base,
872 TGSI_OPCODE_DIV,
873 coords[chan],
874 coords[3]);
875 }
876
877 if (opcode == TGSI_OPCODE_TXP)
878 coords[3] = bld_base->base.one;
879
880 /* Pack LOD bias value */
881 if (opcode == TGSI_OPCODE_TXB)
882 address[count++] = coords[3];
883
884 if ((target == TGSI_TEXTURE_CUBE || target == TGSI_TEXTURE_SHADOWCUBE) &&
885 opcode != TGSI_OPCODE_TXQ)
886 radeon_llvm_emit_prepare_cube_coords(bld_base, emit_data, coords);
887
888 /* Pack depth comparison value */
889 switch (target) {
890 case TGSI_TEXTURE_SHADOW1D:
891 case TGSI_TEXTURE_SHADOW1D_ARRAY:
892 case TGSI_TEXTURE_SHADOW2D:
893 case TGSI_TEXTURE_SHADOWRECT:
894 address[count++] = coords[2];
895 break;
896 case TGSI_TEXTURE_SHADOWCUBE:
897 case TGSI_TEXTURE_SHADOW2D_ARRAY:
898 address[count++] = coords[3];
899 break;
900 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
901 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, 0);
902 }
903
904 /* Pack texture coordinates */
905 address[count++] = coords[0];
906 switch (target) {
907 case TGSI_TEXTURE_2D:
908 case TGSI_TEXTURE_2D_ARRAY:
909 case TGSI_TEXTURE_3D:
910 case TGSI_TEXTURE_CUBE:
911 case TGSI_TEXTURE_RECT:
912 case TGSI_TEXTURE_SHADOW2D:
913 case TGSI_TEXTURE_SHADOWRECT:
914 case TGSI_TEXTURE_SHADOW2D_ARRAY:
915 case TGSI_TEXTURE_SHADOWCUBE:
916 case TGSI_TEXTURE_2D_MSAA:
917 case TGSI_TEXTURE_2D_ARRAY_MSAA:
918 case TGSI_TEXTURE_CUBE_ARRAY:
919 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
920 address[count++] = coords[1];
921 }
922 switch (target) {
923 case TGSI_TEXTURE_3D:
924 case TGSI_TEXTURE_CUBE:
925 case TGSI_TEXTURE_SHADOWCUBE:
926 case TGSI_TEXTURE_CUBE_ARRAY:
927 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
928 address[count++] = coords[2];
929 }
930
931 /* Pack array slice */
932 switch (target) {
933 case TGSI_TEXTURE_1D_ARRAY:
934 address[count++] = coords[1];
935 }
936 switch (target) {
937 case TGSI_TEXTURE_2D_ARRAY:
938 case TGSI_TEXTURE_2D_ARRAY_MSAA:
939 case TGSI_TEXTURE_SHADOW2D_ARRAY:
940 address[count++] = coords[2];
941 }
942 switch (target) {
943 case TGSI_TEXTURE_CUBE_ARRAY:
944 case TGSI_TEXTURE_SHADOW1D_ARRAY:
945 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
946 address[count++] = coords[3];
947 }
948
949 /* Pack LOD */
950 if (opcode == TGSI_OPCODE_TXL)
951 address[count++] = coords[3];
952
953 if (count > 16) {
954 assert(!"Cannot handle more than 16 texture address parameters");
955 count = 16;
956 }
957
958 for (chan = 0; chan < count; chan++ ) {
959 address[chan] = LLVMBuildBitCast(gallivm->builder,
960 address[chan],
961 LLVMInt32TypeInContext(gallivm->context),
962 "");
963 }
964
965 /* Pad to power of two vector */
966 while (count < util_next_power_of_two(count))
967 address[count++] = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
968
969 emit_data->args[0] = lp_build_gather_values(gallivm, address, count);
970
971 /* Resource */
972 emit_data->args[1] = si_shader_ctx->resources[emit_data->inst->Src[1].Register.Index];
973
974 /* Sampler */
975 emit_data->args[2] = si_shader_ctx->samplers[emit_data->inst->Src[1].Register.Index];
976
977 /* Dimensions */
978 emit_data->args[3] = lp_build_const_int32(bld_base->base.gallivm, target);
979
980 emit_data->arg_count = 4;
981
982 emit_data->dst_type = LLVMVectorType(
983 LLVMFloatTypeInContext(bld_base->base.gallivm->context),
984 4);
985 }
986
987 static void build_tex_intrinsic(const struct lp_build_tgsi_action * action,
988 struct lp_build_tgsi_context * bld_base,
989 struct lp_build_emit_data * emit_data)
990 {
991 struct lp_build_context * base = &bld_base->base;
992 char intr_name[23];
993
994 sprintf(intr_name, "%sv%ui32", action->intr_name,
995 LLVMGetVectorSize(LLVMTypeOf(emit_data->args[0])));
996
997 emit_data->output[emit_data->chan] = build_intrinsic(
998 base->gallivm->builder, intr_name, emit_data->dst_type,
999 emit_data->args, emit_data->arg_count,
1000 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1001 }
1002
1003 static const struct lp_build_tgsi_action tex_action = {
1004 .fetch_args = tex_fetch_args,
1005 .emit = build_tex_intrinsic,
1006 .intr_name = "llvm.SI.sample."
1007 };
1008
1009 static const struct lp_build_tgsi_action txb_action = {
1010 .fetch_args = tex_fetch_args,
1011 .emit = build_tex_intrinsic,
1012 .intr_name = "llvm.SI.sampleb."
1013 };
1014
1015 static const struct lp_build_tgsi_action txl_action = {
1016 .fetch_args = tex_fetch_args,
1017 .emit = build_tex_intrinsic,
1018 .intr_name = "llvm.SI.samplel."
1019 };
1020
1021 static void create_meta_data(struct si_shader_context *si_shader_ctx)
1022 {
1023 struct gallivm_state *gallivm = si_shader_ctx->radeon_bld.soa.bld_base.base.gallivm;
1024 LLVMValueRef args[3];
1025
1026 args[0] = LLVMMDStringInContext(gallivm->context, "const", 5);
1027 args[1] = 0;
1028 args[2] = lp_build_const_int32(gallivm, 1);
1029
1030 si_shader_ctx->const_md = LLVMMDNodeInContext(gallivm->context, args, 3);
1031 }
1032
1033 static void create_function(struct si_shader_context *si_shader_ctx)
1034 {
1035 struct gallivm_state *gallivm = si_shader_ctx->radeon_bld.soa.bld_base.base.gallivm;
1036 LLVMTypeRef params[20], f32, i8, i32, v2i32, v3i32;
1037 unsigned i;
1038
1039 i8 = LLVMInt8TypeInContext(gallivm->context);
1040 i32 = LLVMInt32TypeInContext(gallivm->context);
1041 f32 = LLVMFloatTypeInContext(gallivm->context);
1042 v2i32 = LLVMVectorType(i32, 2);
1043 v3i32 = LLVMVectorType(i32, 3);
1044
1045 params[SI_PARAM_CONST] = LLVMPointerType(LLVMVectorType(i8, 16), CONST_ADDR_SPACE);
1046 params[SI_PARAM_SAMPLER] = params[SI_PARAM_CONST];
1047 params[SI_PARAM_RESOURCE] = LLVMPointerType(LLVMVectorType(i8, 32), CONST_ADDR_SPACE);
1048
1049 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
1050 params[SI_PARAM_VERTEX_BUFFER] = params[SI_PARAM_SAMPLER];
1051 params[SI_PARAM_START_INSTANCE] = i32;
1052 params[SI_PARAM_VERTEX_ID] = i32;
1053 params[SI_PARAM_DUMMY_0] = i32;
1054 params[SI_PARAM_DUMMY_1] = i32;
1055 params[SI_PARAM_INSTANCE_ID] = i32;
1056 radeon_llvm_create_func(&si_shader_ctx->radeon_bld, params, 9);
1057
1058 } else {
1059 params[SI_PARAM_PRIM_MASK] = i32;
1060 params[SI_PARAM_PERSP_SAMPLE] = v2i32;
1061 params[SI_PARAM_PERSP_CENTER] = v2i32;
1062 params[SI_PARAM_PERSP_CENTROID] = v2i32;
1063 params[SI_PARAM_PERSP_PULL_MODEL] = v3i32;
1064 params[SI_PARAM_LINEAR_SAMPLE] = v2i32;
1065 params[SI_PARAM_LINEAR_CENTER] = v2i32;
1066 params[SI_PARAM_LINEAR_CENTROID] = v2i32;
1067 params[SI_PARAM_LINE_STIPPLE_TEX] = f32;
1068 params[SI_PARAM_POS_X_FLOAT] = f32;
1069 params[SI_PARAM_POS_Y_FLOAT] = f32;
1070 params[SI_PARAM_POS_Z_FLOAT] = f32;
1071 params[SI_PARAM_POS_W_FLOAT] = f32;
1072 params[SI_PARAM_FRONT_FACE] = f32;
1073 params[SI_PARAM_ANCILLARY] = f32;
1074 params[SI_PARAM_SAMPLE_COVERAGE] = f32;
1075 params[SI_PARAM_POS_FIXED_PT] = f32;
1076 radeon_llvm_create_func(&si_shader_ctx->radeon_bld, params, 20);
1077 }
1078
1079 radeon_llvm_shader_type(si_shader_ctx->radeon_bld.main_fn, si_shader_ctx->type);
1080 for (i = SI_PARAM_CONST; i <= SI_PARAM_VERTEX_BUFFER; ++i) {
1081 LLVMValueRef P = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, i);
1082 LLVMAddAttribute(P, LLVMInRegAttribute);
1083 }
1084
1085 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
1086 LLVMValueRef P = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
1087 SI_PARAM_START_INSTANCE);
1088 LLVMAddAttribute(P, LLVMInRegAttribute);
1089 }
1090 }
1091
1092 static void preload_constants(struct si_shader_context *si_shader_ctx)
1093 {
1094 struct lp_build_tgsi_context * bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
1095 struct gallivm_state * gallivm = bld_base->base.gallivm;
1096 const struct tgsi_shader_info * info = bld_base->info;
1097
1098 unsigned i, num_const = info->file_max[TGSI_FILE_CONSTANT] + 1;
1099
1100 LLVMValueRef ptr;
1101
1102 if (num_const == 0)
1103 return;
1104
1105 /* Allocate space for the constant values */
1106 si_shader_ctx->constants = CALLOC(num_const * 4, sizeof(LLVMValueRef));
1107
1108 /* Load the resource descriptor */
1109 ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
1110 si_shader_ctx->const_resource = build_indexed_load(si_shader_ctx, ptr, bld_base->uint_bld.zero);
1111
1112 /* Load the constants, we rely on the code sinking to do the rest */
1113 for (i = 0; i < num_const * 4; ++i) {
1114 LLVMValueRef args[2] = {
1115 si_shader_ctx->const_resource,
1116 lp_build_const_int32(gallivm, i * 4)
1117 };
1118 si_shader_ctx->constants[i] = build_intrinsic(gallivm->builder, "llvm.SI.load.const",
1119 bld_base->base.elem_type, args, 2, LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1120 }
1121 }
1122
1123 static void preload_samplers(struct si_shader_context *si_shader_ctx)
1124 {
1125 struct lp_build_tgsi_context * bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
1126 struct gallivm_state * gallivm = bld_base->base.gallivm;
1127 const struct tgsi_shader_info * info = bld_base->info;
1128
1129 unsigned i, num_samplers = info->file_max[TGSI_FILE_SAMPLER] + 1;
1130
1131 LLVMValueRef res_ptr, samp_ptr;
1132 LLVMValueRef offset;
1133
1134 if (num_samplers == 0)
1135 return;
1136
1137 /* Allocate space for the values */
1138 si_shader_ctx->resources = CALLOC(num_samplers, sizeof(LLVMValueRef));
1139 si_shader_ctx->samplers = CALLOC(num_samplers, sizeof(LLVMValueRef));
1140
1141 res_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_RESOURCE);
1142 samp_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_SAMPLER);
1143
1144 /* Load the resources and samplers, we rely on the code sinking to do the rest */
1145 for (i = 0; i < num_samplers; ++i) {
1146
1147 /* Resource */
1148 offset = lp_build_const_int32(gallivm, i);
1149 si_shader_ctx->resources[i] = build_indexed_load(si_shader_ctx, res_ptr, offset);
1150
1151 /* Sampler */
1152 offset = lp_build_const_int32(gallivm, i);
1153 si_shader_ctx->samplers[i] = build_indexed_load(si_shader_ctx, samp_ptr, offset);
1154 }
1155 }
1156
1157 int si_compile_llvm(struct r600_context *rctx, struct si_pipe_shader *shader,
1158 LLVMModuleRef mod)
1159 {
1160 unsigned i;
1161 uint32_t *ptr;
1162 bool dump;
1163 struct radeon_llvm_binary binary;
1164
1165 dump = debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE);
1166
1167 memset(&binary, 0, sizeof(binary));
1168 radeon_llvm_compile(mod, &binary,
1169 r600_get_llvm_processor_name(rctx->screen->family), dump);
1170 if (dump) {
1171 fprintf(stderr, "SI CODE:\n");
1172 for (i = 0; i < binary.code_size; i+=4 ) {
1173 fprintf(stderr, "%02x%02x%02x%02x\n", binary.code[i + 3],
1174 binary.code[i + 2], binary.code[i + 1],
1175 binary.code[i]);
1176 }
1177 }
1178
1179 /* XXX: We may be able to emit some of these values directly rather than
1180 * extracting fields to be emitted later.
1181 */
1182 for (i = 0; i < binary.config_size; i+= 8) {
1183 unsigned reg = util_le32_to_cpu(*(uint32_t*)(binary.config + i));
1184 unsigned value = util_le32_to_cpu(*(uint32_t*)(binary.config + i + 4));
1185 switch (reg) {
1186 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
1187 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
1188 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
1189 case R_00B848_COMPUTE_PGM_RSRC1:
1190 shader->num_sgprs = (G_00B028_SGPRS(value) + 1) * 8;
1191 shader->num_vgprs = (G_00B028_VGPRS(value) + 1) * 4;
1192 break;
1193 case R_0286CC_SPI_PS_INPUT_ENA:
1194 shader->spi_ps_input_ena = value;
1195 break;
1196 default:
1197 fprintf(stderr, "Warning: Compiler emitted unknown "
1198 "config register: 0x%x\n", reg);
1199 break;
1200 }
1201 }
1202
1203 /* copy new shader */
1204 si_resource_reference(&shader->bo, NULL);
1205 shader->bo = si_resource_create_custom(rctx->context.screen, PIPE_USAGE_IMMUTABLE,
1206 binary.code_size);
1207 if (shader->bo == NULL) {
1208 return -ENOMEM;
1209 }
1210
1211 ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
1212 if (0 /*R600_BIG_ENDIAN*/) {
1213 for (i = 0; i < binary.code_size / 4; ++i) {
1214 ptr[i] = util_bswap32(*(uint32_t*)(binary.code + i*4));
1215 }
1216 } else {
1217 memcpy(ptr, binary.code, binary.code_size);
1218 }
1219 rctx->ws->buffer_unmap(shader->bo->cs_buf);
1220
1221 free(binary.code);
1222 free(binary.config);
1223
1224 return 0;
1225 }
1226
1227 int si_pipe_shader_create(
1228 struct pipe_context *ctx,
1229 struct si_pipe_shader *shader)
1230 {
1231 struct r600_context *rctx = (struct r600_context*)ctx;
1232 struct si_pipe_shader_selector *sel = shader->selector;
1233 struct si_shader_context si_shader_ctx;
1234 struct tgsi_shader_info shader_info;
1235 struct lp_build_tgsi_context * bld_base;
1236 LLVMModuleRef mod;
1237 bool dump;
1238 int r = 0;
1239
1240 dump = debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE);
1241
1242 assert(shader->shader.noutput == 0);
1243 assert(shader->shader.ninterp == 0);
1244 assert(shader->shader.ninput == 0);
1245
1246 memset(&si_shader_ctx, 0, sizeof(si_shader_ctx));
1247 radeon_llvm_context_init(&si_shader_ctx.radeon_bld);
1248 bld_base = &si_shader_ctx.radeon_bld.soa.bld_base;
1249
1250 tgsi_scan_shader(sel->tokens, &shader_info);
1251 shader->shader.uses_kill = shader_info.uses_kill;
1252 shader->shader.uses_instanceid = shader_info.uses_instanceid;
1253 bld_base->info = &shader_info;
1254 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
1255 bld_base->emit_epilogue = si_llvm_emit_epilogue;
1256
1257 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
1258 bld_base->op_actions[TGSI_OPCODE_TXB] = txb_action;
1259 bld_base->op_actions[TGSI_OPCODE_TXL] = txl_action;
1260 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
1261
1262 si_shader_ctx.radeon_bld.load_input = declare_input;
1263 si_shader_ctx.radeon_bld.load_system_value = declare_system_value;
1264 si_shader_ctx.tokens = sel->tokens;
1265 tgsi_parse_init(&si_shader_ctx.parse, si_shader_ctx.tokens);
1266 si_shader_ctx.shader = shader;
1267 si_shader_ctx.type = si_shader_ctx.parse.FullHeader.Processor.Processor;
1268
1269 create_meta_data(&si_shader_ctx);
1270 create_function(&si_shader_ctx);
1271 preload_constants(&si_shader_ctx);
1272 preload_samplers(&si_shader_ctx);
1273
1274 shader->shader.nr_cbufs = rctx->framebuffer.nr_cbufs;
1275
1276 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
1277 * conversion fails. */
1278 if (dump) {
1279 tgsi_dump(sel->tokens, 0);
1280 }
1281
1282 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
1283 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
1284 FREE(si_shader_ctx.constants);
1285 FREE(si_shader_ctx.resources);
1286 FREE(si_shader_ctx.samplers);
1287 return -EINVAL;
1288 }
1289
1290 radeon_llvm_finalize_module(&si_shader_ctx.radeon_bld);
1291
1292 mod = bld_base->base.gallivm->module;
1293 r = si_compile_llvm(rctx, shader, mod);
1294
1295 radeon_llvm_dispose(&si_shader_ctx.radeon_bld);
1296 tgsi_parse_free(&si_shader_ctx.parse);
1297
1298 FREE(si_shader_ctx.constants);
1299 FREE(si_shader_ctx.resources);
1300 FREE(si_shader_ctx.samplers);
1301
1302 return r;
1303 }
1304
1305 void si_pipe_shader_destroy(struct pipe_context *ctx, struct si_pipe_shader *shader)
1306 {
1307 si_resource_reference(&shader->bo, NULL);
1308 }