r600g: make tgsi-to-llvm generates store.pixel* intrinsic for fs
[mesa.git] / src / gallium / drivers / r600 / r600_llvm.c
1 #include "r600_llvm.h"
2
3 #include "gallivm/lp_bld_const.h"
4 #include "gallivm/lp_bld_intr.h"
5 #include "gallivm/lp_bld_gather.h"
6 #include "tgsi/tgsi_parse.h"
7 #include "util/u_double_list.h"
8 #include "util/u_memory.h"
9
10 #include "r600.h"
11 #include "r600_asm.h"
12 #include "r600_opcodes.h"
13 #include "r600_shader.h"
14 #include "radeon_llvm.h"
15 #include "radeon_llvm_emit.h"
16
17 #include <stdio.h>
18
19 #if defined R600_USE_LLVM || defined HAVE_OPENCL
20
21 static LLVMValueRef llvm_fetch_const(
22 struct lp_build_tgsi_context * bld_base,
23 const struct tgsi_full_src_register *reg,
24 enum tgsi_opcode_type type,
25 unsigned swizzle)
26 {
27 LLVMValueRef idx = lp_build_const_int32(bld_base->base.gallivm,
28 radeon_llvm_reg_index_soa(reg->Register.Index, swizzle));
29 LLVMValueRef cval = build_intrinsic(bld_base->base.gallivm->builder,
30 "llvm.AMDGPU.load.const", bld_base->base.elem_type,
31 &idx, 1, LLVMReadNoneAttribute);
32
33 return bitcast(bld_base, type, cval);
34 }
35
36 static void llvm_load_system_value(
37 struct radeon_llvm_context * ctx,
38 unsigned index,
39 const struct tgsi_full_declaration *decl)
40 {
41 unsigned chan;
42
43 switch (decl->Semantic.Name) {
44 case TGSI_SEMANTIC_INSTANCEID: chan = 3; break;
45 case TGSI_SEMANTIC_VERTEXID: chan = 0; break;
46 default: assert(!"unknown system value");
47 }
48
49 LLVMValueRef reg = lp_build_const_int32(
50 ctx->soa.bld_base.base.gallivm, chan);
51 ctx->system_values[index] = build_intrinsic(
52 ctx->soa.bld_base.base.gallivm->builder,
53 "llvm.R600.load.input",
54 ctx->soa.bld_base.base.elem_type, &reg, 1,
55 LLVMReadNoneAttribute);
56 }
57
58 static LLVMValueRef llvm_fetch_system_value(
59 struct lp_build_tgsi_context * bld_base,
60 const struct tgsi_full_src_register *reg,
61 enum tgsi_opcode_type type,
62 unsigned swizzle)
63 {
64 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
65 LLVMValueRef cval = ctx->system_values[reg->Register.Index];
66 return bitcast(bld_base, type, cval);
67 }
68
69 static LLVMValueRef
70 llvm_load_input_helper(
71 struct radeon_llvm_context * ctx,
72 const char *intrinsic, unsigned idx)
73 {
74 LLVMValueRef reg = lp_build_const_int32(
75 ctx->soa.bld_base.base.gallivm,
76 idx);
77 return build_intrinsic(
78 ctx->soa.bld_base.base.gallivm->builder,
79 intrinsic,
80 ctx->soa.bld_base.base.elem_type, &reg, 1,
81 LLVMReadNoneAttribute);
82 }
83
84 static LLVMValueRef
85 llvm_face_select_helper(
86 struct radeon_llvm_context * ctx,
87 const char *intrinsic, unsigned face_register,
88 unsigned frontcolor_register, unsigned backcolor_regiser)
89 {
90
91 LLVMValueRef backcolor = llvm_load_input_helper(
92 ctx,
93 intrinsic,
94 backcolor_regiser);
95 LLVMValueRef front_color = llvm_load_input_helper(
96 ctx,
97 intrinsic,
98 frontcolor_register);
99 LLVMValueRef face = llvm_load_input_helper(
100 ctx,
101 "llvm.R600.load.input",
102 face_register);
103 LLVMValueRef is_face_positive = LLVMBuildFCmp(
104 ctx->soa.bld_base.base.gallivm->builder,
105 LLVMRealUGT, face,
106 lp_build_const_float(ctx->soa.bld_base.base.gallivm, 0.0f),
107 "");
108 return LLVMBuildSelect(
109 ctx->soa.bld_base.base.gallivm->builder,
110 is_face_positive,
111 front_color,
112 backcolor,
113 "");
114 }
115
116 static void llvm_load_input(
117 struct radeon_llvm_context * ctx,
118 unsigned input_index,
119 const struct tgsi_full_declaration *decl)
120 {
121 unsigned chan;
122
123 const char *intrinsics = "llvm.R600.load.input";
124 unsigned offset = 4 * ctx->reserved_reg_count;
125
126 if (ctx->type == TGSI_PROCESSOR_FRAGMENT && ctx->chip_class >= EVERGREEN) {
127 switch (decl->Interp.Interpolate) {
128 case TGSI_INTERPOLATE_COLOR:
129 case TGSI_INTERPOLATE_PERSPECTIVE:
130 offset = 0;
131 intrinsics = "llvm.R600.load.input.perspective";
132 break;
133 case TGSI_INTERPOLATE_LINEAR:
134 offset = 0;
135 intrinsics = "llvm.R600.load.input.linear";
136 break;
137 case TGSI_INTERPOLATE_CONSTANT:
138 offset = 0;
139 intrinsics = "llvm.R600.load.input.constant";
140 break;
141 default:
142 assert(0 && "Unknow Interpolate mode");
143 }
144 }
145
146 for (chan = 0; chan < 4; chan++) {
147 unsigned soa_index = radeon_llvm_reg_index_soa(input_index,
148 chan);
149
150 switch (decl->Semantic.Name) {
151 case TGSI_SEMANTIC_FACE:
152 ctx->inputs[soa_index] = llvm_load_input_helper(ctx,
153 "llvm.R600.load.input",
154 4 * ctx->face_input);
155 break;
156 case TGSI_SEMANTIC_POSITION:
157 if (ctx->type != TGSI_PROCESSOR_FRAGMENT || chan != 3) {
158 ctx->inputs[soa_index] = llvm_load_input_helper(ctx,
159 "llvm.R600.load.input",
160 soa_index + (ctx->reserved_reg_count * 4));
161 } else {
162 LLVMValueRef w_coord = llvm_load_input_helper(ctx,
163 "llvm.R600.load.input",
164 soa_index + (ctx->reserved_reg_count * 4));
165 ctx->inputs[soa_index] = LLVMBuildFDiv(ctx->gallivm.builder,
166 lp_build_const_float(&(ctx->gallivm), 1.0f), w_coord, "");
167 }
168 break;
169 case TGSI_SEMANTIC_COLOR:
170 if (ctx->two_side) {
171 unsigned front_location, back_location;
172 unsigned back_reg = ctx->r600_inputs[input_index]
173 .potential_back_facing_reg;
174 if (ctx->chip_class >= EVERGREEN) {
175 front_location = 4 * ctx->r600_inputs[input_index].lds_pos + chan;
176 back_location = 4 * ctx->r600_inputs[back_reg].lds_pos + chan;
177 } else {
178 front_location = soa_index + 4 * ctx->reserved_reg_count;
179 back_location = radeon_llvm_reg_index_soa(
180 ctx->r600_inputs[back_reg].gpr,
181 chan);
182 }
183 ctx->inputs[soa_index] = llvm_face_select_helper(ctx,
184 intrinsics,
185 4 * ctx->face_input, front_location, back_location);
186 break;
187 }
188 default:
189 {
190 unsigned location;
191 if (ctx->chip_class >= EVERGREEN) {
192 location = 4 * ctx->r600_inputs[input_index].lds_pos + chan;
193 } else {
194 location = soa_index + 4 * ctx->reserved_reg_count;
195 }
196 /* The * 4 is assuming that we are in soa mode. */
197 ctx->inputs[soa_index] = llvm_load_input_helper(ctx,
198 intrinsics, location);
199
200 break;
201 }
202 }
203 }
204 }
205
206 static void llvm_emit_prologue(struct lp_build_tgsi_context * bld_base)
207 {
208 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
209 struct lp_build_context * base = &bld_base->base;
210 unsigned i;
211
212 /* Reserve special input registers */
213 for (i = 0; i < ctx->reserved_reg_count; i++) {
214 unsigned chan;
215 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
216 LLVMValueRef reg_index = lp_build_const_int32(
217 base->gallivm,
218 radeon_llvm_reg_index_soa(i, chan));
219 lp_build_intrinsic_unary(base->gallivm->builder,
220 "llvm.AMDGPU.reserve.reg",
221 LLVMVoidTypeInContext(base->gallivm->context),
222 reg_index);
223 }
224 }
225 }
226
227 static void llvm_emit_epilogue(struct lp_build_tgsi_context * bld_base)
228 {
229 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
230 struct lp_build_context * base = &bld_base->base;
231 unsigned i;
232
233 unsigned color_count = 0;
234 boolean has_color = false;
235
236 /* Add the necessary export instructions */
237 for (i = 0; i < ctx->output_reg_count; i++) {
238 unsigned chan;
239 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
240 LLVMValueRef output;
241 unsigned adjusted_reg_idx = i +
242 ctx->reserved_reg_count;
243
244 output = LLVMBuildLoad(base->gallivm->builder,
245 ctx->soa.outputs[i][chan], "");
246
247 if (ctx->type == TGSI_PROCESSOR_VERTEX) {
248 LLVMValueRef reg_index = lp_build_const_int32(
249 base->gallivm,
250 radeon_llvm_reg_index_soa(adjusted_reg_idx, chan));
251 lp_build_intrinsic_binary(
252 base->gallivm->builder,
253 "llvm.AMDGPU.store.output",
254 LLVMVoidTypeInContext(base->gallivm->context),
255 output, reg_index);
256 } else if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
257 switch (ctx->r600_outputs[i].name) {
258 case TGSI_SEMANTIC_COLOR:
259 has_color = true;
260 if ( color_count/4 < ctx->color_buffer_count) {
261 if (ctx->fs_color_all) {
262 for (unsigned j = 0; j < ctx->color_buffer_count; j++) {
263 LLVMValueRef reg_index = lp_build_const_int32(
264 base->gallivm,
265 (j * 4) + chan);
266 lp_build_intrinsic_binary(
267 base->gallivm->builder,
268 "llvm.R600.store.pixel.color",
269 LLVMVoidTypeInContext(base->gallivm->context),
270 output, reg_index);
271 }
272 } else {
273 LLVMValueRef reg_index = lp_build_const_int32(
274 base->gallivm,
275 (color_count++/4) * 4 + chan);
276 lp_build_intrinsic_binary(
277 base->gallivm->builder,
278 "llvm.R600.store.pixel.color",
279 LLVMVoidTypeInContext(base->gallivm->context),
280 output, reg_index);
281 }
282 }
283 break;
284 case TGSI_SEMANTIC_POSITION:
285 if (chan != 2)
286 continue;
287 lp_build_intrinsic_unary(
288 base->gallivm->builder,
289 "llvm.R600.store.pixel.depth",
290 LLVMVoidTypeInContext(base->gallivm->context),
291 output);
292 break;
293 case TGSI_SEMANTIC_STENCIL:
294 if (chan != 1)
295 continue;
296 lp_build_intrinsic_unary(
297 base->gallivm->builder,
298 "llvm.R600.store.pixel.stencil",
299 LLVMVoidTypeInContext(base->gallivm->context),
300 output);
301 break;
302 }
303 }
304 }
305 }
306
307 if (!has_color && ctx->type == TGSI_PROCESSOR_FRAGMENT)
308 lp_build_intrinsic(base->gallivm->builder, "llvm.R600.store.pixel.dummy", LLVMVoidTypeInContext(base->gallivm->context), 0, 0);
309 }
310
311 static void llvm_emit_tex(
312 const struct lp_build_tgsi_action * action,
313 struct lp_build_tgsi_context * bld_base,
314 struct lp_build_emit_data * emit_data)
315 {
316 struct gallivm_state * gallivm = bld_base->base.gallivm;
317 LLVMValueRef args[6];
318 unsigned c, sampler_src;
319
320 assert(emit_data->arg_count + 2 <= Elements(args));
321
322 for (c = 0; c < emit_data->arg_count; ++c)
323 args[c] = emit_data->args[c];
324
325 sampler_src = emit_data->inst->Instruction.NumSrcRegs-1;
326
327 args[c++] = lp_build_const_int32(gallivm,
328 emit_data->inst->Src[sampler_src].Register.Index);
329 args[c++] = lp_build_const_int32(gallivm,
330 emit_data->inst->Texture.Texture);
331
332 emit_data->output[0] = build_intrinsic(gallivm->builder,
333 action->intr_name,
334 emit_data->dst_type, args, c, LLVMReadNoneAttribute);
335 }
336
337 static void emit_cndlt(
338 const struct lp_build_tgsi_action * action,
339 struct lp_build_tgsi_context * bld_base,
340 struct lp_build_emit_data * emit_data)
341 {
342 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
343 LLVMValueRef float_zero = lp_build_const_float(
344 bld_base->base.gallivm, 0.0f);
345 LLVMValueRef cmp = LLVMBuildFCmp(
346 builder, LLVMRealULT, emit_data->args[0], float_zero, "");
347 emit_data->output[emit_data->chan] = LLVMBuildSelect(builder,
348 cmp, emit_data->args[1], emit_data->args[2], "");
349 }
350
351 static void dp_fetch_args(
352 struct lp_build_tgsi_context * bld_base,
353 struct lp_build_emit_data * emit_data)
354 {
355 struct lp_build_context * base = &bld_base->base;
356 unsigned chan;
357 LLVMValueRef elements[2][4];
358 unsigned opcode = emit_data->inst->Instruction.Opcode;
359 unsigned dp_components = (opcode == TGSI_OPCODE_DP2 ? 2 :
360 (opcode == TGSI_OPCODE_DP3 ? 3 : 4));
361 for (chan = 0 ; chan < dp_components; chan++) {
362 elements[0][chan] = lp_build_emit_fetch(bld_base,
363 emit_data->inst, 0, chan);
364 elements[1][chan] = lp_build_emit_fetch(bld_base,
365 emit_data->inst, 1, chan);
366 }
367
368 for ( ; chan < 4; chan++) {
369 elements[0][chan] = base->zero;
370 elements[1][chan] = base->zero;
371 }
372
373 /* Fix up for DPH */
374 if (opcode == TGSI_OPCODE_DPH) {
375 elements[0][TGSI_CHAN_W] = base->one;
376 }
377
378 emit_data->args[0] = lp_build_gather_values(bld_base->base.gallivm,
379 elements[0], 4);
380 emit_data->args[1] = lp_build_gather_values(bld_base->base.gallivm,
381 elements[1], 4);
382 emit_data->arg_count = 2;
383
384 emit_data->dst_type = base->elem_type;
385 }
386
387 static struct lp_build_tgsi_action dot_action = {
388 .fetch_args = dp_fetch_args,
389 .emit = build_tgsi_intrinsic_nomem,
390 .intr_name = "llvm.AMDGPU.dp4"
391 };
392
393
394
395 LLVMModuleRef r600_tgsi_llvm(
396 struct radeon_llvm_context * ctx,
397 const struct tgsi_token * tokens)
398 {
399 struct tgsi_shader_info shader_info;
400 struct lp_build_tgsi_context * bld_base = &ctx->soa.bld_base;
401 radeon_llvm_context_init(ctx);
402 tgsi_scan_shader(tokens, &shader_info);
403
404 bld_base->info = &shader_info;
405 bld_base->userdata = ctx;
406 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = llvm_fetch_const;
407 bld_base->emit_fetch_funcs[TGSI_FILE_SYSTEM_VALUE] = llvm_fetch_system_value;
408 bld_base->emit_prologue = llvm_emit_prologue;
409 bld_base->emit_epilogue = llvm_emit_epilogue;
410 ctx->userdata = ctx;
411 ctx->load_input = llvm_load_input;
412 ctx->load_system_value = llvm_load_system_value;
413
414 bld_base->op_actions[TGSI_OPCODE_DP2] = dot_action;
415 bld_base->op_actions[TGSI_OPCODE_DP3] = dot_action;
416 bld_base->op_actions[TGSI_OPCODE_DP4] = dot_action;
417 bld_base->op_actions[TGSI_OPCODE_DPH] = dot_action;
418 bld_base->op_actions[TGSI_OPCODE_DDX].emit = llvm_emit_tex;
419 bld_base->op_actions[TGSI_OPCODE_DDY].emit = llvm_emit_tex;
420 bld_base->op_actions[TGSI_OPCODE_TEX].emit = llvm_emit_tex;
421 bld_base->op_actions[TGSI_OPCODE_TXB].emit = llvm_emit_tex;
422 bld_base->op_actions[TGSI_OPCODE_TXD].emit = llvm_emit_tex;
423 bld_base->op_actions[TGSI_OPCODE_TXL].emit = llvm_emit_tex;
424 bld_base->op_actions[TGSI_OPCODE_TXF].emit = llvm_emit_tex;
425 bld_base->op_actions[TGSI_OPCODE_TXQ].emit = llvm_emit_tex;
426 bld_base->op_actions[TGSI_OPCODE_TXP].emit = llvm_emit_tex;
427 bld_base->op_actions[TGSI_OPCODE_CMP].emit = emit_cndlt;
428
429 lp_build_tgsi_llvm(bld_base, tokens);
430
431 radeon_llvm_finalize_module(ctx);
432
433 return ctx->gallivm.module;
434 }
435
436 const char * r600_llvm_gpu_string(enum radeon_family family)
437 {
438 const char * gpu_family;
439
440 switch (family) {
441 case CHIP_R600:
442 case CHIP_RV610:
443 case CHIP_RV630:
444 case CHIP_RV620:
445 case CHIP_RV635:
446 case CHIP_RS780:
447 case CHIP_RS880:
448 gpu_family = "r600";
449 break;
450 case CHIP_RV710:
451 gpu_family = "rv710";
452 break;
453 case CHIP_RV730:
454 gpu_family = "rv730";
455 break;
456 case CHIP_RV670:
457 case CHIP_RV740:
458 case CHIP_RV770:
459 gpu_family = "rv770";
460 break;
461 case CHIP_PALM:
462 case CHIP_CEDAR:
463 gpu_family = "cedar";
464 break;
465 case CHIP_SUMO:
466 case CHIP_SUMO2:
467 case CHIP_REDWOOD:
468 gpu_family = "redwood";
469 break;
470 case CHIP_JUNIPER:
471 gpu_family = "juniper";
472 break;
473 case CHIP_HEMLOCK:
474 case CHIP_CYPRESS:
475 gpu_family = "cypress";
476 break;
477 case CHIP_BARTS:
478 gpu_family = "barts";
479 break;
480 case CHIP_TURKS:
481 gpu_family = "turks";
482 break;
483 case CHIP_CAICOS:
484 gpu_family = "caicos";
485 break;
486 case CHIP_CAYMAN:
487 case CHIP_ARUBA:
488 gpu_family = "cayman";
489 break;
490 default:
491 gpu_family = "";
492 fprintf(stderr, "Chip not supported by r600 llvm "
493 "backend, please file a bug at bugs.freedesktop.org\n");
494 break;
495 }
496 return gpu_family;
497 }
498
499 unsigned r600_llvm_compile(
500 LLVMModuleRef mod,
501 unsigned char ** inst_bytes,
502 unsigned * inst_byte_count,
503 enum radeon_family family,
504 unsigned dump)
505 {
506 const char * gpu_family = r600_llvm_gpu_string(family);
507 return radeon_llvm_compile(mod, inst_bytes, inst_byte_count,
508 gpu_family, dump);
509 }
510
511 #endif