ilo: move internal shader interface to a new header
[mesa.git] / src / gallium / drivers / r600 / r600_llvm.c
1 #include "r600_llvm.h"
2
3 #include "gallivm/lp_bld_const.h"
4 #include "gallivm/lp_bld_intr.h"
5 #include "gallivm/lp_bld_gather.h"
6 #include "tgsi/tgsi_parse.h"
7 #include "util/u_double_list.h"
8 #include "util/u_memory.h"
9
10 #include "evergreend.h"
11 #include "r600_asm.h"
12 #include "r600_sq.h"
13 #include "r600_opcodes.h"
14 #include "r600_shader.h"
15 #include "r600_pipe.h"
16 #include "radeon_llvm.h"
17 #include "radeon_llvm_emit.h"
18
19 #include <stdio.h>
20
21 #if defined R600_USE_LLVM || defined HAVE_OPENCL
22
23 #define CONSTANT_BUFFER_0_ADDR_SPACE 8
24 #define CONSTANT_BUFFER_1_ADDR_SPACE (CONSTANT_BUFFER_0_ADDR_SPACE + R600_UCP_CONST_BUFFER)
25 #define CONSTANT_TXQ_BUFFER (CONSTANT_BUFFER_0_ADDR_SPACE + R600_TXQ_CONST_BUFFER)
26
27 static LLVMValueRef llvm_load_const_buffer(
28 struct lp_build_tgsi_context * bld_base,
29 LLVMValueRef OffsetValue,
30 unsigned ConstantAddressSpace)
31 {
32 LLVMValueRef offset[2] = {
33 LLVMConstInt(LLVMInt64TypeInContext(bld_base->base.gallivm->context), 0, false),
34 OffsetValue
35 };
36
37 LLVMTypeRef const_ptr_type = LLVMPointerType(LLVMArrayType(LLVMVectorType(bld_base->base.elem_type, 4), 1024),
38 ConstantAddressSpace);
39 LLVMValueRef const_ptr = LLVMBuildIntToPtr(bld_base->base.gallivm->builder, lp_build_const_int32(bld_base->base.gallivm, 0), const_ptr_type, "");
40 LLVMValueRef ptr = LLVMBuildGEP(bld_base->base.gallivm->builder, const_ptr, offset, 2, "");
41 return LLVMBuildLoad(bld_base->base.gallivm->builder, ptr, "");
42 }
43
44 static LLVMValueRef llvm_fetch_const(
45 struct lp_build_tgsi_context * bld_base,
46 const struct tgsi_full_src_register *reg,
47 enum tgsi_opcode_type type,
48 unsigned swizzle)
49 {
50 LLVMValueRef offset = lp_build_const_int32(bld_base->base.gallivm, reg->Register.Index);
51 if (reg->Register.Indirect) {
52 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
53 LLVMValueRef index = LLVMBuildLoad(bld_base->base.gallivm->builder, bld->addr[reg->Indirect.Index][reg->Indirect.Swizzle], "");
54 offset = LLVMBuildAdd(bld_base->base.gallivm->builder, offset, index, "");
55 }
56 unsigned ConstantAddressSpace = CONSTANT_BUFFER_0_ADDR_SPACE ;
57 if (reg->Register.Dimension) {
58 ConstantAddressSpace += reg->Dimension.Index;
59 }
60 LLVMValueRef cvecval = llvm_load_const_buffer(bld_base, offset, ConstantAddressSpace);
61 LLVMValueRef cval = LLVMBuildExtractElement(bld_base->base.gallivm->builder, cvecval, lp_build_const_int32(bld_base->base.gallivm, swizzle), "");
62 return bitcast(bld_base, type, cval);
63 }
64
65 static void llvm_load_system_value(
66 struct radeon_llvm_context * ctx,
67 unsigned index,
68 const struct tgsi_full_declaration *decl)
69 {
70 unsigned chan;
71
72 switch (decl->Semantic.Name) {
73 case TGSI_SEMANTIC_INSTANCEID: chan = 3; break;
74 case TGSI_SEMANTIC_VERTEXID: chan = 0; break;
75 default: assert(!"unknown system value");
76 }
77
78 LLVMValueRef reg = lp_build_const_int32(
79 ctx->soa.bld_base.base.gallivm, chan);
80 ctx->system_values[index] = build_intrinsic(
81 ctx->soa.bld_base.base.gallivm->builder,
82 "llvm.R600.load.input",
83 ctx->soa.bld_base.base.elem_type, &reg, 1,
84 LLVMReadNoneAttribute);
85 }
86
87 static LLVMValueRef
88 llvm_load_input_helper(
89 struct radeon_llvm_context * ctx,
90 unsigned idx, int interp, int ij_index)
91 {
92 const struct lp_build_context * bb = &ctx->soa.bld_base.base;
93 LLVMValueRef arg[2];
94 int arg_count;
95 const char * intrinsic;
96
97 arg[0] = lp_build_const_int32(bb->gallivm, idx);
98
99 if (interp) {
100 intrinsic = "llvm.R600.interp.input";
101 arg[1] = lp_build_const_int32(bb->gallivm, ij_index);
102 arg_count = 2;
103 } else {
104 intrinsic = "llvm.R600.load.input";
105 arg_count = 1;
106 }
107
108 return build_intrinsic(bb->gallivm->builder, intrinsic,
109 bb->elem_type, &arg[0], arg_count, LLVMReadNoneAttribute);
110 }
111
112 static LLVMValueRef
113 llvm_face_select_helper(
114 struct radeon_llvm_context * ctx,
115 unsigned face_loc, LLVMValueRef front_color, LLVMValueRef back_color)
116 {
117 const struct lp_build_context * bb = &ctx->soa.bld_base.base;
118 LLVMValueRef face = llvm_load_input_helper(ctx, face_loc, 0, 0);
119 LLVMValueRef is_front = LLVMBuildFCmp(
120 bb->gallivm->builder, LLVMRealUGT, face,
121 lp_build_const_float(bb->gallivm, 0.0f), "");
122 return LLVMBuildSelect(bb->gallivm->builder, is_front,
123 front_color, back_color, "");
124 }
125
126 static void llvm_load_input(
127 struct radeon_llvm_context * ctx,
128 unsigned input_index,
129 const struct tgsi_full_declaration *decl)
130 {
131 const struct r600_shader_io * input = &ctx->r600_inputs[input_index];
132 unsigned chan;
133 unsigned interp = 0;
134 int ij_index;
135 int two_side = (ctx->two_side && input->name == TGSI_SEMANTIC_COLOR);
136 LLVMValueRef v;
137
138 if (ctx->chip_class >= EVERGREEN && ctx->type == TGSI_PROCESSOR_FRAGMENT &&
139 input->spi_sid) {
140 interp = 1;
141 ij_index = (input->interpolate > 0) ? input->ij_index : -1;
142 }
143
144 for (chan = 0; chan < 4; chan++) {
145 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
146 int loc;
147
148 if (interp) {
149 loc = 4 * input->lds_pos + chan;
150 } else {
151 if (input->name == TGSI_SEMANTIC_FACE)
152 loc = 4 * ctx->face_gpr;
153 else
154 loc = 4 * input->gpr + chan;
155 }
156
157 v = llvm_load_input_helper(ctx, loc, interp, ij_index);
158
159 if (two_side) {
160 struct r600_shader_io * back_input =
161 &ctx->r600_inputs[input->back_color_input];
162 int back_loc = interp ? back_input->lds_pos : back_input->gpr;
163 LLVMValueRef v2;
164
165 back_loc = 4 * back_loc + chan;
166 v2 = llvm_load_input_helper(ctx, back_loc, interp, ij_index);
167 v = llvm_face_select_helper(ctx, 4 * ctx->face_gpr, v, v2);
168 } else if (input->name == TGSI_SEMANTIC_POSITION &&
169 ctx->type == TGSI_PROCESSOR_FRAGMENT && chan == 3) {
170 /* RCP for fragcoord.w */
171 v = LLVMBuildFDiv(ctx->gallivm.builder,
172 lp_build_const_float(&(ctx->gallivm), 1.0f),
173 v, "");
174 }
175
176 ctx->inputs[soa_index] = v;
177 }
178 }
179
180 static void llvm_emit_prologue(struct lp_build_tgsi_context * bld_base)
181 {
182 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
183 radeon_llvm_shader_type(ctx->main_fn, ctx->type);
184
185 }
186
187 static void llvm_emit_epilogue(struct lp_build_tgsi_context * bld_base)
188 {
189 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
190 struct lp_build_context * base = &bld_base->base;
191 struct pipe_stream_output_info * so = ctx->stream_outputs;
192 unsigned i;
193 unsigned next_pos = 60;
194 unsigned next_param = 0;
195
196 unsigned color_count = 0;
197 boolean has_color = false;
198
199 if (ctx->type == TGSI_PROCESSOR_VERTEX && so->num_outputs) {
200 for (i = 0; i < so->num_outputs; i++) {
201 unsigned register_index = so->output[i].register_index;
202 unsigned start_component = so->output[i].start_component;
203 unsigned num_components = so->output[i].num_components;
204 unsigned dst_offset = so->output[i].dst_offset;
205 unsigned chan;
206 LLVMValueRef elements[4];
207 if (dst_offset < start_component) {
208 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
209 elements[chan] = LLVMBuildLoad(base->gallivm->builder,
210 ctx->soa.outputs[register_index][(chan + start_component) % TGSI_NUM_CHANNELS], "");
211 }
212 start_component = 0;
213 } else {
214 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
215 elements[chan] = LLVMBuildLoad(base->gallivm->builder,
216 ctx->soa.outputs[register_index][chan], "");
217 }
218 }
219 LLVMValueRef output = lp_build_gather_values(base->gallivm, elements, 4);
220 LLVMValueRef args[4];
221 args[0] = output;
222 args[1] = lp_build_const_int32(base->gallivm, dst_offset - start_component);
223 args[2] = lp_build_const_int32(base->gallivm, so->output[i].output_buffer);
224 args[3] = lp_build_const_int32(base->gallivm, ((1 << num_components) - 1) << start_component);
225 lp_build_intrinsic(base->gallivm->builder, "llvm.R600.store.stream.output",
226 LLVMVoidTypeInContext(base->gallivm->context), args, 4);
227 }
228 }
229
230 /* Add the necessary export instructions */
231 for (i = 0; i < ctx->output_reg_count; i++) {
232 unsigned chan;
233 LLVMValueRef elements[4];
234 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
235 elements[chan] = LLVMBuildLoad(base->gallivm->builder,
236 ctx->soa.outputs[i][chan], "");
237 }
238 if (ctx->alpha_to_one && ctx->type == TGSI_PROCESSOR_FRAGMENT && ctx->r600_outputs[i].name == TGSI_SEMANTIC_COLOR)
239 elements[3] = lp_build_const_float(base->gallivm, 1.0f);
240 LLVMValueRef output = lp_build_gather_values(base->gallivm, elements, 4);
241
242 if (ctx->type == TGSI_PROCESSOR_VERTEX) {
243 switch (ctx->r600_outputs[i].name) {
244 case TGSI_SEMANTIC_POSITION:
245 case TGSI_SEMANTIC_PSIZE: {
246 LLVMValueRef args[3];
247 args[0] = output;
248 args[1] = lp_build_const_int32(base->gallivm, next_pos++);
249 args[2] = lp_build_const_int32(base->gallivm, V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS);
250 build_intrinsic(
251 base->gallivm->builder,
252 "llvm.R600.store.swizzle",
253 LLVMVoidTypeInContext(base->gallivm->context),
254 args, 3, 0);
255 break;
256 }
257 case TGSI_SEMANTIC_CLIPVERTEX: {
258 LLVMValueRef args[3];
259 unsigned reg_index;
260 unsigned base_vector_chan;
261 LLVMValueRef adjusted_elements[4];
262 for (reg_index = 0; reg_index < 2; reg_index ++) {
263 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
264 LLVMValueRef offset = lp_build_const_int32(bld_base->base.gallivm, reg_index * 4 + chan);
265 LLVMValueRef base_vector = llvm_load_const_buffer(bld_base, offset, CONSTANT_BUFFER_1_ADDR_SPACE);
266 args[0] = output;
267 args[1] = base_vector;
268 adjusted_elements[chan] = build_intrinsic(base->gallivm->builder,
269 "llvm.AMDGPU.dp4", bld_base->base.elem_type,
270 args, 2, LLVMReadNoneAttribute);
271 }
272 args[0] = lp_build_gather_values(base->gallivm,
273 adjusted_elements, 4);
274 args[1] = lp_build_const_int32(base->gallivm, next_pos++);
275 args[2] = lp_build_const_int32(base->gallivm, V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS);
276 build_intrinsic(
277 base->gallivm->builder,
278 "llvm.R600.store.swizzle",
279 LLVMVoidTypeInContext(base->gallivm->context),
280 args, 3, 0);
281 }
282 break;
283 }
284 case TGSI_SEMANTIC_CLIPDIST : {
285 LLVMValueRef args[3];
286 args[0] = output;
287 args[1] = lp_build_const_int32(base->gallivm, next_pos++);
288 args[2] = lp_build_const_int32(base->gallivm, V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS);
289 build_intrinsic(
290 base->gallivm->builder,
291 "llvm.R600.store.swizzle",
292 LLVMVoidTypeInContext(base->gallivm->context),
293 args, 3, 0);
294 args[1] = lp_build_const_int32(base->gallivm, next_param++);
295 args[2] = lp_build_const_int32(base->gallivm, V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM);
296 build_intrinsic(
297 base->gallivm->builder,
298 "llvm.R600.store.swizzle",
299 LLVMVoidTypeInContext(base->gallivm->context),
300 args, 3, 0);
301 break;
302 }
303 case TGSI_SEMANTIC_FOG: {
304 elements[0] = LLVMBuildLoad(base->gallivm->builder,
305 ctx->soa.outputs[i][0], "");
306 elements[1] = elements[2] = lp_build_const_float(base->gallivm, 0.0f);
307 elements[3] = lp_build_const_float(base->gallivm, 1.0f);
308
309 LLVMValueRef args[3];
310 args[0] = lp_build_gather_values(base->gallivm, elements, 4);
311 args[1] = lp_build_const_int32(base->gallivm, next_param++);
312 args[2] = lp_build_const_int32(base->gallivm, V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM);
313 build_intrinsic(
314 base->gallivm->builder,
315 "llvm.R600.store.swizzle",
316 LLVMVoidTypeInContext(base->gallivm->context),
317 args, 3, 0);
318 break;
319 }
320 default: {
321 LLVMValueRef args[3];
322 args[0] = output;
323 args[1] = lp_build_const_int32(base->gallivm, next_param++);
324 args[2] = lp_build_const_int32(base->gallivm, V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM);
325 build_intrinsic(
326 base->gallivm->builder,
327 "llvm.R600.store.swizzle",
328 LLVMVoidTypeInContext(base->gallivm->context),
329 args, 3, 0);
330 break;
331 }
332 }
333 } else if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
334 switch (ctx->r600_outputs[i].name) {
335 case TGSI_SEMANTIC_COLOR:
336 has_color = true;
337 if ( color_count < ctx->color_buffer_count) {
338 LLVMValueRef args[3];
339 args[0] = output;
340 if (ctx->fs_color_all) {
341 for (unsigned j = 0; j < ctx->color_buffer_count; j++) {
342 args[1] = lp_build_const_int32(base->gallivm, j);
343 args[2] = lp_build_const_int32(base->gallivm, V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL);
344 build_intrinsic(
345 base->gallivm->builder,
346 "llvm.R600.store.swizzle",
347 LLVMVoidTypeInContext(base->gallivm->context),
348 args, 3, 0);
349 }
350 } else {
351 args[1] = lp_build_const_int32(base->gallivm, color_count++);
352 args[2] = lp_build_const_int32(base->gallivm, V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL);
353 build_intrinsic(
354 base->gallivm->builder,
355 "llvm.R600.store.swizzle",
356 LLVMVoidTypeInContext(base->gallivm->context),
357 args, 3, 0);
358 }
359 }
360 break;
361 case TGSI_SEMANTIC_POSITION:
362 lp_build_intrinsic_unary(
363 base->gallivm->builder,
364 "llvm.R600.store.pixel.depth",
365 LLVMVoidTypeInContext(base->gallivm->context),
366 LLVMBuildLoad(base->gallivm->builder, ctx->soa.outputs[i][2], ""));
367 break;
368 case TGSI_SEMANTIC_STENCIL:
369 lp_build_intrinsic_unary(
370 base->gallivm->builder,
371 "llvm.R600.store.pixel.stencil",
372 LLVMVoidTypeInContext(base->gallivm->context),
373 LLVMBuildLoad(base->gallivm->builder, ctx->soa.outputs[i][1], ""));
374 break;
375 }
376 }
377 }
378 // Add dummy exports
379 if (ctx->type == TGSI_PROCESSOR_VERTEX) {
380 if (!next_param) {
381 lp_build_intrinsic_unary(base->gallivm->builder, "llvm.R600.store.dummy",
382 LLVMVoidTypeInContext(base->gallivm->context),
383 lp_build_const_int32(base->gallivm, V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM));
384 }
385 if (!(next_pos-60)) {
386 lp_build_intrinsic_unary(base->gallivm->builder, "llvm.R600.store.dummy",
387 LLVMVoidTypeInContext(base->gallivm->context),
388 lp_build_const_int32(base->gallivm, V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS));
389 }
390 }
391 if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
392 if (!has_color) {
393 lp_build_intrinsic_unary(base->gallivm->builder, "llvm.R600.store.dummy",
394 LLVMVoidTypeInContext(base->gallivm->context),
395 lp_build_const_int32(base->gallivm, V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL));
396 }
397 }
398
399 }
400
401 static void llvm_emit_tex(
402 const struct lp_build_tgsi_action * action,
403 struct lp_build_tgsi_context * bld_base,
404 struct lp_build_emit_data * emit_data)
405 {
406 struct gallivm_state * gallivm = bld_base->base.gallivm;
407 LLVMValueRef args[6];
408 unsigned c, sampler_src;
409
410 if (emit_data->inst->Texture.Texture == TGSI_TEXTURE_BUFFER) {
411 switch (emit_data->inst->Instruction.Opcode) {
412 case TGSI_OPCODE_TXQ: {
413 LLVMValueRef offset = lp_build_const_int32(bld_base->base.gallivm, 1);
414 LLVMValueRef cvecval = llvm_load_const_buffer(bld_base, offset, R600_BUFFER_INFO_CONST_BUFFER);
415 emit_data->output[0] = cvecval;
416 return;
417 }
418 case TGSI_OPCODE_TXF: {
419 args[0] = LLVMBuildExtractElement(gallivm->builder, emit_data->args[0], lp_build_const_int32(gallivm, 0), "");
420 args[1] = lp_build_const_int32(gallivm, R600_MAX_CONST_BUFFERS);
421 emit_data->output[0] = build_intrinsic(gallivm->builder,
422 "llvm.R600.load.texbuf",
423 emit_data->dst_type, args, 2, LLVMReadNoneAttribute);
424 }
425 return;
426 default:
427 break;
428 }
429 }
430
431 if (emit_data->inst->Instruction.Opcode == TGSI_OPCODE_TEX) {
432 LLVMValueRef Vector[4] = {
433 LLVMBuildExtractElement(gallivm->builder, emit_data->args[0],
434 lp_build_const_int32(gallivm, 0), ""),
435 LLVMBuildExtractElement(gallivm->builder, emit_data->args[0],
436 lp_build_const_int32(gallivm, 1), ""),
437 LLVMBuildExtractElement(gallivm->builder, emit_data->args[0],
438 lp_build_const_int32(gallivm, 2), ""),
439 LLVMBuildExtractElement(gallivm->builder, emit_data->args[0],
440 lp_build_const_int32(gallivm, 3), ""),
441 };
442 switch (emit_data->inst->Texture.Texture) {
443 case TGSI_TEXTURE_2D:
444 case TGSI_TEXTURE_RECT:
445 Vector[2] = Vector[3] = LLVMGetUndef(bld_base->base.elem_type);
446 break;
447 case TGSI_TEXTURE_1D:
448 Vector[1] = Vector[2] = Vector[3] = LLVMGetUndef(bld_base->base.elem_type);
449 break;
450 default:
451 break;
452 }
453 args[0] = lp_build_gather_values(gallivm, Vector, 4);
454 } else {
455 args[0] = emit_data->args[0];
456 }
457
458 assert(emit_data->arg_count + 2 <= Elements(args));
459
460 for (c = 1; c < emit_data->arg_count; ++c)
461 args[c] = emit_data->args[c];
462
463 if (emit_data->inst->Instruction.Opcode == TGSI_OPCODE_TXF) {
464 args[1] = LLVMBuildShl(gallivm->builder, args[1], lp_build_const_int32(gallivm, 1), "");
465 args[2] = LLVMBuildShl(gallivm->builder, args[2], lp_build_const_int32(gallivm, 1), "");
466 args[3] = LLVMBuildShl(gallivm->builder, args[3], lp_build_const_int32(gallivm, 1), "");
467 }
468
469 sampler_src = emit_data->inst->Instruction.NumSrcRegs-1;
470
471 args[c++] = lp_build_const_int32(gallivm,
472 emit_data->inst->Src[sampler_src].Register.Index + R600_MAX_CONST_BUFFERS);
473 args[c++] = lp_build_const_int32(gallivm,
474 emit_data->inst->Src[sampler_src].Register.Index);
475 args[c++] = lp_build_const_int32(gallivm,
476 emit_data->inst->Texture.Texture);
477
478 emit_data->output[0] = build_intrinsic(gallivm->builder,
479 action->intr_name,
480 emit_data->dst_type, args, c, LLVMReadNoneAttribute);
481
482 if (emit_data->inst->Instruction.Opcode == TGSI_OPCODE_TXQ &&
483 ((emit_data->inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
484 emit_data->inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY)))
485 if (emit_data->inst->Dst[0].Register.WriteMask & 4) {
486 LLVMValueRef offset = lp_build_const_int32(bld_base->base.gallivm, 0);
487 LLVMValueRef ZLayer = LLVMBuildExtractElement(gallivm->builder,
488 llvm_load_const_buffer(bld_base, offset, CONSTANT_TXQ_BUFFER),
489 lp_build_const_int32(gallivm, 0), "");
490
491 emit_data->output[0] = LLVMBuildInsertElement(gallivm->builder, emit_data->output[0], ZLayer, lp_build_const_int32(gallivm, 2), "");
492 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
493 ctx->has_txq_cube_array_z_comp = true;
494 }
495 }
496
497 static void emit_cndlt(
498 const struct lp_build_tgsi_action * action,
499 struct lp_build_tgsi_context * bld_base,
500 struct lp_build_emit_data * emit_data)
501 {
502 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
503 LLVMValueRef float_zero = lp_build_const_float(
504 bld_base->base.gallivm, 0.0f);
505 LLVMValueRef cmp = LLVMBuildFCmp(
506 builder, LLVMRealULT, emit_data->args[0], float_zero, "");
507 emit_data->output[emit_data->chan] = LLVMBuildSelect(builder,
508 cmp, emit_data->args[1], emit_data->args[2], "");
509 }
510
511 static void dp_fetch_args(
512 struct lp_build_tgsi_context * bld_base,
513 struct lp_build_emit_data * emit_data)
514 {
515 struct lp_build_context * base = &bld_base->base;
516 unsigned chan;
517 LLVMValueRef elements[2][4];
518 unsigned opcode = emit_data->inst->Instruction.Opcode;
519 unsigned dp_components = (opcode == TGSI_OPCODE_DP2 ? 2 :
520 (opcode == TGSI_OPCODE_DP3 ? 3 : 4));
521 for (chan = 0 ; chan < dp_components; chan++) {
522 elements[0][chan] = lp_build_emit_fetch(bld_base,
523 emit_data->inst, 0, chan);
524 elements[1][chan] = lp_build_emit_fetch(bld_base,
525 emit_data->inst, 1, chan);
526 }
527
528 for ( ; chan < 4; chan++) {
529 elements[0][chan] = base->zero;
530 elements[1][chan] = base->zero;
531 }
532
533 /* Fix up for DPH */
534 if (opcode == TGSI_OPCODE_DPH) {
535 elements[0][TGSI_CHAN_W] = base->one;
536 }
537
538 emit_data->args[0] = lp_build_gather_values(bld_base->base.gallivm,
539 elements[0], 4);
540 emit_data->args[1] = lp_build_gather_values(bld_base->base.gallivm,
541 elements[1], 4);
542 emit_data->arg_count = 2;
543
544 emit_data->dst_type = base->elem_type;
545 }
546
547 static struct lp_build_tgsi_action dot_action = {
548 .fetch_args = dp_fetch_args,
549 .emit = build_tgsi_intrinsic_nomem,
550 .intr_name = "llvm.AMDGPU.dp4"
551 };
552
553
554
555 LLVMModuleRef r600_tgsi_llvm(
556 struct radeon_llvm_context * ctx,
557 const struct tgsi_token * tokens)
558 {
559 struct tgsi_shader_info shader_info;
560 struct lp_build_tgsi_context * bld_base = &ctx->soa.bld_base;
561 radeon_llvm_context_init(ctx);
562 radeon_llvm_create_func(ctx, NULL, 0);
563 tgsi_scan_shader(tokens, &shader_info);
564
565 bld_base->info = &shader_info;
566 bld_base->userdata = ctx;
567 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = llvm_fetch_const;
568 bld_base->emit_prologue = llvm_emit_prologue;
569 bld_base->emit_epilogue = llvm_emit_epilogue;
570 ctx->userdata = ctx;
571 ctx->load_input = llvm_load_input;
572 ctx->load_system_value = llvm_load_system_value;
573
574 bld_base->op_actions[TGSI_OPCODE_DP2] = dot_action;
575 bld_base->op_actions[TGSI_OPCODE_DP3] = dot_action;
576 bld_base->op_actions[TGSI_OPCODE_DP4] = dot_action;
577 bld_base->op_actions[TGSI_OPCODE_DPH] = dot_action;
578 bld_base->op_actions[TGSI_OPCODE_DDX].emit = llvm_emit_tex;
579 bld_base->op_actions[TGSI_OPCODE_DDY].emit = llvm_emit_tex;
580 bld_base->op_actions[TGSI_OPCODE_TEX].emit = llvm_emit_tex;
581 bld_base->op_actions[TGSI_OPCODE_TEX2].emit = llvm_emit_tex;
582 bld_base->op_actions[TGSI_OPCODE_TXB].emit = llvm_emit_tex;
583 bld_base->op_actions[TGSI_OPCODE_TXB2].emit = llvm_emit_tex;
584 bld_base->op_actions[TGSI_OPCODE_TXD].emit = llvm_emit_tex;
585 bld_base->op_actions[TGSI_OPCODE_TXL].emit = llvm_emit_tex;
586 bld_base->op_actions[TGSI_OPCODE_TXL2].emit = llvm_emit_tex;
587 bld_base->op_actions[TGSI_OPCODE_TXF].emit = llvm_emit_tex;
588 bld_base->op_actions[TGSI_OPCODE_TXQ].emit = llvm_emit_tex;
589 bld_base->op_actions[TGSI_OPCODE_TXP].emit = llvm_emit_tex;
590 bld_base->op_actions[TGSI_OPCODE_CMP].emit = emit_cndlt;
591
592 lp_build_tgsi_llvm(bld_base, tokens);
593
594 radeon_llvm_finalize_module(ctx);
595
596 return ctx->gallivm.module;
597 }
598
599 /* We need to define these R600 registers here, because we can't include
600 * evergreend.h and r600d.h.
601 */
602 #define R_028868_SQ_PGM_RESOURCES_VS 0x028868
603 #define R_028850_SQ_PGM_RESOURCES_PS 0x028850
604
605 unsigned r600_llvm_compile(
606 LLVMModuleRef mod,
607 enum radeon_family family,
608 struct r600_bytecode *bc,
609 boolean *use_kill,
610 unsigned dump)
611 {
612 unsigned r;
613 struct radeon_llvm_binary binary;
614 const char * gpu_family = r600_llvm_gpu_string(family);
615 unsigned i;
616
617 r = radeon_llvm_compile(mod, &binary, gpu_family, dump);
618
619 assert(binary.code_size % 4 == 0);
620 bc->bytecode = CALLOC(1, binary.code_size);
621 memcpy(bc->bytecode, binary.code, binary.code_size);
622 bc->ndw = binary.code_size / 4;
623
624 for (i = 0; i < binary.config_size; i+= 8) {
625 unsigned reg =
626 util_le32_to_cpu(*(uint32_t*)(binary.config + i));
627 unsigned value =
628 util_le32_to_cpu(*(uint32_t*)(binary.config + i + 4));
629 switch (reg) {
630 /* R600 / R700 */
631 case R_028850_SQ_PGM_RESOURCES_PS:
632 case R_028868_SQ_PGM_RESOURCES_VS:
633 /* Evergreen / Northern Islands */
634 case R_028844_SQ_PGM_RESOURCES_PS:
635 case R_028860_SQ_PGM_RESOURCES_VS:
636 case R_0288D4_SQ_PGM_RESOURCES_LS:
637 bc->ngpr = G_028844_NUM_GPRS(value);
638 bc->nstack = G_028844_STACK_SIZE(value);
639 break;
640 case R_02880C_DB_SHADER_CONTROL:
641 *use_kill = G_02880C_KILL_ENABLE(value);
642 break;
643 }
644 }
645
646 return r;
647 }
648
649 #endif