r600: enable TEXCOORD semantic for TGSI.
[mesa.git] / src / gallium / drivers / r600 / r600_shader.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "r600_sq.h"
24 #include "r600_formats.h"
25 #include "r600_opcodes.h"
26 #include "r600_shader.h"
27 #include "r600_dump.h"
28 #include "r600d.h"
29 #include "sfn/sfn_nir.h"
30
31 #include "sb/sb_public.h"
32
33 #include "pipe/p_shader_tokens.h"
34 #include "tgsi/tgsi_info.h"
35 #include "tgsi/tgsi_parse.h"
36 #include "tgsi/tgsi_scan.h"
37 #include "tgsi/tgsi_dump.h"
38 #include "tgsi/tgsi_from_mesa.h"
39 #include "nir/tgsi_to_nir.h"
40 #include "nir/nir_to_tgsi_info.h"
41 #include "compiler/nir/nir.h"
42 #include "util/u_bitcast.h"
43 #include "util/u_memory.h"
44 #include "util/u_math.h"
45 #include <stdio.h>
46 #include <errno.h>
47
48 /* CAYMAN notes
49 Why CAYMAN got loops for lots of instructions is explained here.
50
51 -These 8xx t-slot only ops are implemented in all vector slots.
52 MUL_LIT, FLT_TO_UINT, INT_TO_FLT, UINT_TO_FLT
53 These 8xx t-slot only opcodes become vector ops, with all four
54 slots expecting the arguments on sources a and b. Result is
55 broadcast to all channels.
56 MULLO_INT, MULHI_INT, MULLO_UINT, MULHI_UINT, MUL_64
57 These 8xx t-slot only opcodes become vector ops in the z, y, and
58 x slots.
59 EXP_IEEE, LOG_IEEE/CLAMPED, RECIP_IEEE/CLAMPED/FF/INT/UINT/_64/CLAMPED_64
60 RECIPSQRT_IEEE/CLAMPED/FF/_64/CLAMPED_64
61 SQRT_IEEE/_64
62 SIN/COS
63 The w slot may have an independent co-issued operation, or if the
64 result is required to be in the w slot, the opcode above may be
65 issued in the w slot as well.
66 The compiler must issue the source argument to slots z, y, and x
67 */
68
69 /* Contents of r0 on entry to various shaders
70
71 VS - .x = VertexID
72 .y = RelVertexID (??)
73 .w = InstanceID
74
75 GS - r0.xyw, r1.xyz = per-vertex offsets
76 r0.z = PrimitiveID
77
78 TCS - .x = PatchID
79 .y = RelPatchID (??)
80 .z = InvocationID
81 .w = tess factor base.
82
83 TES - .x = TessCoord.x
84 - .y = TessCoord.y
85 - .z = RelPatchID (??)
86 - .w = PrimitiveID
87
88 PS - face_gpr.z = SampleMask
89 face_gpr.w = SampleID
90 */
91 #define R600_SHADER_BUFFER_INFO_SEL (512 + R600_BUFFER_INFO_OFFSET / 16)
92 static int r600_shader_from_tgsi(struct r600_context *rctx,
93 struct r600_pipe_shader *pipeshader,
94 union r600_shader_key key);
95
96 static void r600_add_gpr_array(struct r600_shader *ps, int start_gpr,
97 int size, unsigned comp_mask) {
98
99 if (!size)
100 return;
101
102 if (ps->num_arrays == ps->max_arrays) {
103 ps->max_arrays += 64;
104 ps->arrays = realloc(ps->arrays, ps->max_arrays *
105 sizeof(struct r600_shader_array));
106 }
107
108 int n = ps->num_arrays;
109 ++ps->num_arrays;
110
111 ps->arrays[n].comp_mask = comp_mask;
112 ps->arrays[n].gpr_start = start_gpr;
113 ps->arrays[n].gpr_count = size;
114 }
115
116 static void r600_dump_streamout(struct pipe_stream_output_info *so)
117 {
118 unsigned i;
119
120 fprintf(stderr, "STREAMOUT\n");
121 for (i = 0; i < so->num_outputs; i++) {
122 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
123 so->output[i].start_component;
124 fprintf(stderr, " %i: MEM_STREAM%d_BUF%i[%i..%i] <- OUT[%i].%s%s%s%s%s\n",
125 i,
126 so->output[i].stream,
127 so->output[i].output_buffer,
128 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
129 so->output[i].register_index,
130 mask & 1 ? "x" : "",
131 mask & 2 ? "y" : "",
132 mask & 4 ? "z" : "",
133 mask & 8 ? "w" : "",
134 so->output[i].dst_offset < so->output[i].start_component ? " (will lower)" : "");
135 }
136 }
137
138 static int store_shader(struct pipe_context *ctx,
139 struct r600_pipe_shader *shader)
140 {
141 struct r600_context *rctx = (struct r600_context *)ctx;
142 uint32_t *ptr, i;
143
144 if (shader->bo == NULL) {
145 shader->bo = (struct r600_resource*)
146 pipe_buffer_create(ctx->screen, 0, PIPE_USAGE_IMMUTABLE, shader->shader.bc.ndw * 4);
147 if (shader->bo == NULL) {
148 return -ENOMEM;
149 }
150 ptr = r600_buffer_map_sync_with_rings(
151 &rctx->b, shader->bo,
152 PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
153 if (R600_BIG_ENDIAN) {
154 for (i = 0; i < shader->shader.bc.ndw; ++i) {
155 ptr[i] = util_cpu_to_le32(shader->shader.bc.bytecode[i]);
156 }
157 } else {
158 memcpy(ptr, shader->shader.bc.bytecode, shader->shader.bc.ndw * sizeof(*ptr));
159 }
160 rctx->b.ws->buffer_unmap(shader->bo->buf);
161 }
162
163 return 0;
164 }
165
166 extern const struct nir_shader_compiler_options r600_nir_options;
167 static int nshader = 0;
168 int r600_pipe_shader_create(struct pipe_context *ctx,
169 struct r600_pipe_shader *shader,
170 union r600_shader_key key)
171 {
172 struct r600_context *rctx = (struct r600_context *)ctx;
173 struct r600_pipe_shader_selector *sel = shader->selector;
174 int r;
175 struct r600_screen *rscreen = (struct r600_screen *)ctx->screen;
176
177 int processor = sel->ir_type == PIPE_SHADER_IR_TGSI ?
178 tgsi_get_processor_type(sel->tokens):
179 pipe_shader_type_from_mesa(sel->nir->info.stage);
180
181 bool dump = r600_can_dump_shader(&rctx->screen->b, processor);
182 unsigned use_sb = !(rctx->screen->b.debug_flags & DBG_NO_SB) &&
183 !(rscreen->b.debug_flags & DBG_NIR);
184 unsigned sb_disasm;
185 unsigned export_shader;
186
187 shader->shader.bc.isa = rctx->isa;
188
189 if (!(rscreen->b.debug_flags & DBG_NIR)) {
190 assert(sel->ir_type == PIPE_SHADER_IR_TGSI);
191 r = r600_shader_from_tgsi(rctx, shader, key);
192 if (r) {
193 R600_ERR("translation from TGSI failed !\n");
194 goto error;
195 }
196 } else {
197 if (sel->ir_type == PIPE_SHADER_IR_TGSI)
198 sel->nir = tgsi_to_nir_noscreen(sel->tokens, &r600_nir_options);
199 nir_tgsi_scan_shader(sel->nir, &sel->info, true);
200 r = r600_shader_from_nir(rctx, shader, &key);
201 if (r) {
202 fprintf(stderr, "--Failed shader--------------------------------------------------\n");
203
204 if (sel->ir_type == PIPE_SHADER_IR_TGSI) {
205 fprintf(stderr, "--TGSI--------------------------------------------------------\n");
206 tgsi_dump(sel->tokens, 0);
207 }
208
209 if (rscreen->b.debug_flags & DBG_NIR) {
210 fprintf(stderr, "--NIR --------------------------------------------------------\n");
211 nir_print_shader(sel->nir, stderr);
212 }
213
214 R600_ERR("translation from NIR failed !\n");
215 goto error;
216 }
217 }
218
219 if (dump) {
220 if (sel->ir_type == PIPE_SHADER_IR_TGSI) {
221 fprintf(stderr, "--TGSI--------------------------------------------------------\n");
222 tgsi_dump(sel->tokens, 0);
223 }
224
225 if (sel->so.num_outputs) {
226 r600_dump_streamout(&sel->so);
227 }
228 }
229
230 if (shader->shader.processor_type == PIPE_SHADER_VERTEX) {
231 /* only disable for vertex shaders in tess paths */
232 if (key.vs.as_ls)
233 use_sb = 0;
234 }
235 use_sb &= (shader->shader.processor_type != PIPE_SHADER_TESS_CTRL);
236 use_sb &= (shader->shader.processor_type != PIPE_SHADER_TESS_EVAL);
237 use_sb &= (shader->shader.processor_type != PIPE_SHADER_COMPUTE);
238
239 /* disable SB for shaders using doubles */
240 use_sb &= !shader->shader.uses_doubles;
241
242 use_sb &= !shader->shader.uses_atomics;
243 use_sb &= !shader->shader.uses_images;
244 use_sb &= !shader->shader.uses_helper_invocation;
245
246 /* Check if the bytecode has already been built. */
247 if (!shader->shader.bc.bytecode) {
248 r = r600_bytecode_build(&shader->shader.bc);
249 if (r) {
250 R600_ERR("building bytecode failed !\n");
251 goto error;
252 }
253 }
254
255 sb_disasm = use_sb || (rctx->screen->b.debug_flags & DBG_SB_DISASM);
256 if (dump && !sb_disasm) {
257 fprintf(stderr, "--------------------------------------------------------------\n");
258 r600_bytecode_disasm(&shader->shader.bc);
259 fprintf(stderr, "______________________________________________________________\n");
260 } else if ((dump && sb_disasm) || use_sb) {
261 r = r600_sb_bytecode_process(rctx, &shader->shader.bc, &shader->shader,
262 dump, use_sb);
263 if (r) {
264 R600_ERR("r600_sb_bytecode_process failed !\n");
265 goto error;
266 }
267 }
268
269 if (dump) {
270 FILE *f;
271 char fname[1024];
272 snprintf(fname, 1024, "shader_from_%s_%d.cpp",
273 (sel->ir_type == PIPE_SHADER_IR_TGSI ?
274 (rscreen->b.debug_flags & DBG_NIR ? "tgsi-nir" : "tgsi")
275 : "nir"), nshader);
276 f = fopen(fname, "w");
277 print_shader_info(f, nshader++, &shader->shader);
278 print_shader_info(stderr, nshader++, &shader->shader);
279 print_pipe_info(stderr, &sel->info);
280 if (sel->ir_type == PIPE_SHADER_IR_TGSI) {
281 fprintf(f, "/****TGSI**********************************\n");
282 tgsi_dump_to_file(sel->tokens, 0, f);
283 }
284
285 if (rscreen->b.debug_flags & DBG_NIR){
286 fprintf(f, "/****NIR **********************************\n");
287 nir_print_shader(sel->nir, f);
288 }
289 fprintf(f, "******************************************/\n");
290 fclose(f);
291 }
292
293 if (shader->gs_copy_shader) {
294 if (dump) {
295 // dump copy shader
296 r = r600_sb_bytecode_process(rctx, &shader->gs_copy_shader->shader.bc,
297 &shader->gs_copy_shader->shader, dump, 0);
298 if (r)
299 goto error;
300 }
301
302 if ((r = store_shader(ctx, shader->gs_copy_shader)))
303 goto error;
304 }
305
306 /* Store the shader in a buffer. */
307 if ((r = store_shader(ctx, shader)))
308 goto error;
309
310 /* Build state. */
311 switch (shader->shader.processor_type) {
312 case PIPE_SHADER_TESS_CTRL:
313 evergreen_update_hs_state(ctx, shader);
314 break;
315 case PIPE_SHADER_TESS_EVAL:
316 if (key.tes.as_es)
317 evergreen_update_es_state(ctx, shader);
318 else
319 evergreen_update_vs_state(ctx, shader);
320 break;
321 case PIPE_SHADER_GEOMETRY:
322 if (rctx->b.chip_class >= EVERGREEN) {
323 evergreen_update_gs_state(ctx, shader);
324 evergreen_update_vs_state(ctx, shader->gs_copy_shader);
325 } else {
326 r600_update_gs_state(ctx, shader);
327 r600_update_vs_state(ctx, shader->gs_copy_shader);
328 }
329 break;
330 case PIPE_SHADER_VERTEX:
331 export_shader = key.vs.as_es;
332 if (rctx->b.chip_class >= EVERGREEN) {
333 if (key.vs.as_ls)
334 evergreen_update_ls_state(ctx, shader);
335 else if (key.vs.as_es)
336 evergreen_update_es_state(ctx, shader);
337 else
338 evergreen_update_vs_state(ctx, shader);
339 } else {
340 if (export_shader)
341 r600_update_es_state(ctx, shader);
342 else
343 r600_update_vs_state(ctx, shader);
344 }
345 break;
346 case PIPE_SHADER_FRAGMENT:
347 if (rctx->b.chip_class >= EVERGREEN) {
348 evergreen_update_ps_state(ctx, shader);
349 } else {
350 r600_update_ps_state(ctx, shader);
351 }
352 break;
353 case PIPE_SHADER_COMPUTE:
354 evergreen_update_ls_state(ctx, shader);
355 break;
356 default:
357 r = -EINVAL;
358 goto error;
359 }
360 return 0;
361
362 error:
363 r600_pipe_shader_destroy(ctx, shader);
364 return r;
365 }
366
367 void r600_pipe_shader_destroy(struct pipe_context *ctx UNUSED, struct r600_pipe_shader *shader)
368 {
369 r600_resource_reference(&shader->bo, NULL);
370 if (shader->shader.bc.cf.next)
371 r600_bytecode_clear(&shader->shader.bc);
372 r600_release_command_buffer(&shader->command_buffer);
373 }
374
375 /*
376 * tgsi -> r600 shader
377 */
378 struct r600_shader_tgsi_instruction;
379
380 struct r600_shader_src {
381 unsigned sel;
382 unsigned swizzle[4];
383 unsigned neg;
384 unsigned abs;
385 unsigned rel;
386 unsigned kc_bank;
387 boolean kc_rel; /* true if cache bank is indexed */
388 uint32_t value[4];
389 };
390
391 struct eg_interp {
392 boolean enabled;
393 unsigned ij_index;
394 };
395
396 struct r600_shader_ctx {
397 struct tgsi_shader_info info;
398 struct tgsi_array_info *array_infos;
399 /* flag for each tgsi temp array if its been spilled or not */
400 bool *spilled_arrays;
401 struct tgsi_parse_context parse;
402 const struct tgsi_token *tokens;
403 unsigned type;
404 unsigned file_offset[TGSI_FILE_COUNT];
405 unsigned temp_reg;
406 const struct r600_shader_tgsi_instruction *inst_info;
407 struct r600_bytecode *bc;
408 struct r600_shader *shader;
409 struct r600_shader_src src[4];
410 uint32_t *literals;
411 uint32_t nliterals;
412 uint32_t max_driver_temp_used;
413 /* needed for evergreen interpolation */
414 struct eg_interp eg_interpolators[6]; // indexed by Persp/Linear * 3 + sample/center/centroid
415 /* evergreen/cayman also store sample mask in face register */
416 int face_gpr;
417 /* sample id is .w component stored in fixed point position register */
418 int fixed_pt_position_gpr;
419 int colors_used;
420 boolean clip_vertex_write;
421 unsigned cv_output;
422 unsigned edgeflag_output;
423 int helper_invoc_reg;
424 int cs_block_size_reg;
425 int cs_grid_size_reg;
426 bool cs_block_size_loaded, cs_grid_size_loaded;
427 int fragcoord_input;
428 int next_ring_offset;
429 int gs_out_ring_offset;
430 int gs_next_vertex;
431 struct r600_shader *gs_for_vs;
432 int gs_export_gpr_tregs[4];
433 int gs_rotated_input[2];
434 const struct pipe_stream_output_info *gs_stream_output_info;
435 unsigned enabled_stream_buffers_mask;
436 unsigned tess_input_info; /* temp with tess input offsets */
437 unsigned tess_output_info; /* temp with tess input offsets */
438 unsigned thread_id_gpr; /* temp with thread id calculated for images */
439 };
440
441 struct r600_shader_tgsi_instruction {
442 unsigned op;
443 int (*process)(struct r600_shader_ctx *ctx);
444 };
445
446 static int emit_gs_ring_writes(struct r600_shader_ctx *ctx, const struct pipe_stream_output_info *so, int stream, bool ind);
447 static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[], eg_shader_tgsi_instruction[], cm_shader_tgsi_instruction[];
448 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx);
449 static inline int callstack_push(struct r600_shader_ctx *ctx, unsigned reason);
450 static void fc_pushlevel(struct r600_shader_ctx *ctx, int type);
451 static int tgsi_else(struct r600_shader_ctx *ctx);
452 static int tgsi_endif(struct r600_shader_ctx *ctx);
453 static int tgsi_bgnloop(struct r600_shader_ctx *ctx);
454 static int tgsi_endloop(struct r600_shader_ctx *ctx);
455 static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx);
456 static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx,
457 unsigned int cb_idx, unsigned cb_rel, unsigned int offset, unsigned ar_chan,
458 unsigned int dst_reg);
459 static void r600_bytecode_src(struct r600_bytecode_alu_src *bc_src,
460 const struct r600_shader_src *shader_src,
461 unsigned chan);
462 static int do_lds_fetch_values(struct r600_shader_ctx *ctx, unsigned temp_reg,
463 unsigned dst_reg, unsigned mask);
464
465 static bool ctx_needs_stack_workaround_8xx(struct r600_shader_ctx *ctx)
466 {
467 if (ctx->bc->family == CHIP_HEMLOCK ||
468 ctx->bc->family == CHIP_CYPRESS ||
469 ctx->bc->family == CHIP_JUNIPER)
470 return false;
471 return true;
472 }
473
474 static int tgsi_last_instruction(unsigned writemask)
475 {
476 int i, lasti = 0;
477
478 for (i = 0; i < 4; i++) {
479 if (writemask & (1 << i)) {
480 lasti = i;
481 }
482 }
483 return lasti;
484 }
485
486 static int tgsi_is_supported(struct r600_shader_ctx *ctx)
487 {
488 struct tgsi_full_instruction *i = &ctx->parse.FullToken.FullInstruction;
489 unsigned j;
490
491 if (i->Instruction.NumDstRegs > 1 && i->Instruction.Opcode != TGSI_OPCODE_DFRACEXP) {
492 R600_ERR("too many dst (%d)\n", i->Instruction.NumDstRegs);
493 return -EINVAL;
494 }
495 #if 0
496 if (i->Instruction.Label) {
497 R600_ERR("label unsupported\n");
498 return -EINVAL;
499 }
500 #endif
501 for (j = 0; j < i->Instruction.NumSrcRegs; j++) {
502 if (i->Src[j].Register.Dimension) {
503 switch (i->Src[j].Register.File) {
504 case TGSI_FILE_CONSTANT:
505 case TGSI_FILE_HW_ATOMIC:
506 break;
507 case TGSI_FILE_INPUT:
508 if (ctx->type == PIPE_SHADER_GEOMETRY ||
509 ctx->type == PIPE_SHADER_TESS_CTRL ||
510 ctx->type == PIPE_SHADER_TESS_EVAL)
511 break;
512 /* fallthrough */
513 case TGSI_FILE_OUTPUT:
514 if (ctx->type == PIPE_SHADER_TESS_CTRL)
515 break;
516 /* fallthrough */
517 default:
518 R600_ERR("unsupported src %d (file %d, dimension %d)\n", j,
519 i->Src[j].Register.File,
520 i->Src[j].Register.Dimension);
521 return -EINVAL;
522 }
523 }
524 }
525 for (j = 0; j < i->Instruction.NumDstRegs; j++) {
526 if (i->Dst[j].Register.Dimension) {
527 if (ctx->type == PIPE_SHADER_TESS_CTRL)
528 continue;
529 R600_ERR("unsupported dst (dimension)\n");
530 return -EINVAL;
531 }
532 }
533 return 0;
534 }
535
536 int eg_get_interpolator_index(unsigned interpolate, unsigned location)
537 {
538 if (interpolate == TGSI_INTERPOLATE_COLOR ||
539 interpolate == TGSI_INTERPOLATE_LINEAR ||
540 interpolate == TGSI_INTERPOLATE_PERSPECTIVE)
541 {
542 int is_linear = interpolate == TGSI_INTERPOLATE_LINEAR;
543 int loc;
544
545 switch(location) {
546 case TGSI_INTERPOLATE_LOC_CENTER:
547 loc = 1;
548 break;
549 case TGSI_INTERPOLATE_LOC_CENTROID:
550 loc = 2;
551 break;
552 case TGSI_INTERPOLATE_LOC_SAMPLE:
553 default:
554 loc = 0; break;
555 }
556
557 return is_linear * 3 + loc;
558 }
559
560 return -1;
561 }
562
563 static void evergreen_interp_assign_ij_index(struct r600_shader_ctx *ctx,
564 int input)
565 {
566 int i = eg_get_interpolator_index(
567 ctx->shader->input[input].interpolate,
568 ctx->shader->input[input].interpolate_location);
569 assert(i >= 0);
570 ctx->shader->input[input].ij_index = ctx->eg_interpolators[i].ij_index;
571 }
572
573 static int evergreen_interp_alu(struct r600_shader_ctx *ctx, int input)
574 {
575 int i, r;
576 struct r600_bytecode_alu alu;
577 int gpr = 0, base_chan = 0;
578 int ij_index = ctx->shader->input[input].ij_index;
579
580 /* work out gpr and base_chan from index */
581 gpr = ij_index / 2;
582 base_chan = (2 * (ij_index % 2)) + 1;
583
584 for (i = 0; i < 8; i++) {
585 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
586
587 if (i < 4)
588 alu.op = ALU_OP2_INTERP_ZW;
589 else
590 alu.op = ALU_OP2_INTERP_XY;
591
592 if ((i > 1) && (i < 6)) {
593 alu.dst.sel = ctx->shader->input[input].gpr;
594 alu.dst.write = 1;
595 }
596
597 alu.dst.chan = i % 4;
598
599 alu.src[0].sel = gpr;
600 alu.src[0].chan = (base_chan - (i % 2));
601
602 alu.src[1].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
603
604 alu.bank_swizzle_force = SQ_ALU_VEC_210;
605 if ((i % 4) == 3)
606 alu.last = 1;
607 r = r600_bytecode_add_alu(ctx->bc, &alu);
608 if (r)
609 return r;
610 }
611 return 0;
612 }
613
614 static int evergreen_interp_flat(struct r600_shader_ctx *ctx, int input)
615 {
616 int i, r;
617 struct r600_bytecode_alu alu;
618
619 for (i = 0; i < 4; i++) {
620 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
621
622 alu.op = ALU_OP1_INTERP_LOAD_P0;
623
624 alu.dst.sel = ctx->shader->input[input].gpr;
625 alu.dst.write = 1;
626
627 alu.dst.chan = i;
628
629 alu.src[0].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
630 alu.src[0].chan = i;
631
632 if (i == 3)
633 alu.last = 1;
634 r = r600_bytecode_add_alu(ctx->bc, &alu);
635 if (r)
636 return r;
637 }
638 return 0;
639 }
640
641 /*
642 * Special export handling in shaders
643 *
644 * shader export ARRAY_BASE for EXPORT_POS:
645 * 60 is position
646 * 61 is misc vector
647 * 62, 63 are clip distance vectors
648 *
649 * The use of the values exported in 61-63 are controlled by PA_CL_VS_OUT_CNTL:
650 * VS_OUT_MISC_VEC_ENA - enables the use of all fields in export 61
651 * USE_VTX_POINT_SIZE - point size in the X channel of export 61
652 * USE_VTX_EDGE_FLAG - edge flag in the Y channel of export 61
653 * USE_VTX_RENDER_TARGET_INDX - render target index in the Z channel of export 61
654 * USE_VTX_VIEWPORT_INDX - viewport index in the W channel of export 61
655 * USE_VTX_KILL_FLAG - kill flag in the Z channel of export 61 (mutually
656 * exclusive from render target index)
657 * VS_OUT_CCDIST0_VEC_ENA/VS_OUT_CCDIST1_VEC_ENA - enable clip distance vectors
658 *
659 *
660 * shader export ARRAY_BASE for EXPORT_PIXEL:
661 * 0-7 CB targets
662 * 61 computed Z vector
663 *
664 * The use of the values exported in the computed Z vector are controlled
665 * by DB_SHADER_CONTROL:
666 * Z_EXPORT_ENABLE - Z as a float in RED
667 * STENCIL_REF_EXPORT_ENABLE - stencil ref as int in GREEN
668 * COVERAGE_TO_MASK_ENABLE - alpha to mask in ALPHA
669 * MASK_EXPORT_ENABLE - pixel sample mask in BLUE
670 * DB_SOURCE_FORMAT - export control restrictions
671 *
672 */
673
674
675 /* Map name/sid pair from tgsi to the 8-bit semantic index for SPI setup */
676 static int r600_spi_sid(struct r600_shader_io * io)
677 {
678 int index, name = io->name;
679
680 /* These params are handled differently, they don't need
681 * semantic indices, so we'll use 0 for them.
682 */
683 if (name == TGSI_SEMANTIC_POSITION ||
684 name == TGSI_SEMANTIC_PSIZE ||
685 name == TGSI_SEMANTIC_EDGEFLAG ||
686 name == TGSI_SEMANTIC_FACE ||
687 name == TGSI_SEMANTIC_SAMPLEMASK)
688 index = 0;
689 else {
690 if (name == TGSI_SEMANTIC_GENERIC) {
691 /* For generic params simply use sid from tgsi */
692 index = 9 + io->sid;
693 } else if (name == TGSI_SEMANTIC_TEXCOORD) {
694 index = io->sid;
695 } else {
696 /* For non-generic params - pack name and sid into 8 bits */
697 index = 0x80 | (name<<3) | (io->sid);
698 }
699
700 /* Make sure that all really used indices have nonzero value, so
701 * we can just compare it to 0 later instead of comparing the name
702 * with different values to detect special cases. */
703 index++;
704 }
705
706 return index;
707 };
708
709 /* we need this to get a common lds index for vs/tcs/tes input/outputs */
710 int r600_get_lds_unique_index(unsigned semantic_name, unsigned index)
711 {
712 switch (semantic_name) {
713 case TGSI_SEMANTIC_POSITION:
714 return 0;
715 case TGSI_SEMANTIC_PSIZE:
716 return 1;
717 case TGSI_SEMANTIC_CLIPDIST:
718 assert(index <= 1);
719 return 2 + index;
720 case TGSI_SEMANTIC_TEXCOORD:
721 return 4 + index;
722 case TGSI_SEMANTIC_GENERIC:
723 if (index <= 63-4)
724 return 4 + index;
725 else
726 /* same explanation as in the default statement,
727 * the only user hitting this is st/nine.
728 */
729 return 0;
730
731 /* patch indices are completely separate and thus start from 0 */
732 case TGSI_SEMANTIC_TESSOUTER:
733 return 0;
734 case TGSI_SEMANTIC_TESSINNER:
735 return 1;
736 case TGSI_SEMANTIC_PATCH:
737 return 2 + index;
738
739 default:
740 /* Don't fail here. The result of this function is only used
741 * for LS, TCS, TES, and GS, where legacy GL semantics can't
742 * occur, but this function is called for all vertex shaders
743 * before it's known whether LS will be compiled or not.
744 */
745 return 0;
746 }
747 }
748
749 /* turn input into interpolate on EG */
750 static int evergreen_interp_input(struct r600_shader_ctx *ctx, int index)
751 {
752 int r = 0;
753
754 if (ctx->shader->input[index].spi_sid) {
755 ctx->shader->input[index].lds_pos = ctx->shader->nlds++;
756 if (ctx->shader->input[index].interpolate > 0) {
757 evergreen_interp_assign_ij_index(ctx, index);
758 r = evergreen_interp_alu(ctx, index);
759 } else {
760 r = evergreen_interp_flat(ctx, index);
761 }
762 }
763 return r;
764 }
765
766 static int select_twoside_color(struct r600_shader_ctx *ctx, int front, int back)
767 {
768 struct r600_bytecode_alu alu;
769 int i, r;
770 int gpr_front = ctx->shader->input[front].gpr;
771 int gpr_back = ctx->shader->input[back].gpr;
772
773 for (i = 0; i < 4; i++) {
774 memset(&alu, 0, sizeof(alu));
775 alu.op = ALU_OP3_CNDGT;
776 alu.is_op3 = 1;
777 alu.dst.write = 1;
778 alu.dst.sel = gpr_front;
779 alu.src[0].sel = ctx->face_gpr;
780 alu.src[1].sel = gpr_front;
781 alu.src[2].sel = gpr_back;
782
783 alu.dst.chan = i;
784 alu.src[1].chan = i;
785 alu.src[2].chan = i;
786 alu.last = (i==3);
787
788 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
789 return r;
790 }
791
792 return 0;
793 }
794
795 /* execute a single slot ALU calculation */
796 static int single_alu_op2(struct r600_shader_ctx *ctx, int op,
797 int dst_sel, int dst_chan,
798 int src0_sel, unsigned src0_chan_val,
799 int src1_sel, unsigned src1_chan_val)
800 {
801 struct r600_bytecode_alu alu;
802 int r, i;
803
804 if (ctx->bc->chip_class == CAYMAN && op == ALU_OP2_MULLO_INT) {
805 for (i = 0; i < 4; i++) {
806 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
807 alu.op = op;
808 alu.src[0].sel = src0_sel;
809 if (src0_sel == V_SQ_ALU_SRC_LITERAL)
810 alu.src[0].value = src0_chan_val;
811 else
812 alu.src[0].chan = src0_chan_val;
813 alu.src[1].sel = src1_sel;
814 if (src1_sel == V_SQ_ALU_SRC_LITERAL)
815 alu.src[1].value = src1_chan_val;
816 else
817 alu.src[1].chan = src1_chan_val;
818 alu.dst.sel = dst_sel;
819 alu.dst.chan = i;
820 alu.dst.write = i == dst_chan;
821 alu.last = (i == 3);
822 r = r600_bytecode_add_alu(ctx->bc, &alu);
823 if (r)
824 return r;
825 }
826 return 0;
827 }
828
829 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
830 alu.op = op;
831 alu.src[0].sel = src0_sel;
832 if (src0_sel == V_SQ_ALU_SRC_LITERAL)
833 alu.src[0].value = src0_chan_val;
834 else
835 alu.src[0].chan = src0_chan_val;
836 alu.src[1].sel = src1_sel;
837 if (src1_sel == V_SQ_ALU_SRC_LITERAL)
838 alu.src[1].value = src1_chan_val;
839 else
840 alu.src[1].chan = src1_chan_val;
841 alu.dst.sel = dst_sel;
842 alu.dst.chan = dst_chan;
843 alu.dst.write = 1;
844 alu.last = 1;
845 r = r600_bytecode_add_alu(ctx->bc, &alu);
846 if (r)
847 return r;
848 return 0;
849 }
850
851 /* execute a single slot ALU calculation */
852 static int single_alu_op3(struct r600_shader_ctx *ctx, int op,
853 int dst_sel, int dst_chan,
854 int src0_sel, unsigned src0_chan_val,
855 int src1_sel, unsigned src1_chan_val,
856 int src2_sel, unsigned src2_chan_val)
857 {
858 struct r600_bytecode_alu alu;
859 int r;
860
861 /* validate this for other ops */
862 assert(op == ALU_OP3_MULADD_UINT24 || op == ALU_OP3_CNDE_INT || op == ALU_OP3_BFE_UINT);
863 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
864 alu.op = op;
865 alu.src[0].sel = src0_sel;
866 if (src0_sel == V_SQ_ALU_SRC_LITERAL)
867 alu.src[0].value = src0_chan_val;
868 else
869 alu.src[0].chan = src0_chan_val;
870 alu.src[1].sel = src1_sel;
871 if (src1_sel == V_SQ_ALU_SRC_LITERAL)
872 alu.src[1].value = src1_chan_val;
873 else
874 alu.src[1].chan = src1_chan_val;
875 alu.src[2].sel = src2_sel;
876 if (src2_sel == V_SQ_ALU_SRC_LITERAL)
877 alu.src[2].value = src2_chan_val;
878 else
879 alu.src[2].chan = src2_chan_val;
880 alu.dst.sel = dst_sel;
881 alu.dst.chan = dst_chan;
882 alu.is_op3 = 1;
883 alu.last = 1;
884 r = r600_bytecode_add_alu(ctx->bc, &alu);
885 if (r)
886 return r;
887 return 0;
888 }
889
890 /* put it in temp_reg.x */
891 static int get_lds_offset0(struct r600_shader_ctx *ctx,
892 int rel_patch_chan,
893 int temp_reg, bool is_patch_var)
894 {
895 int r;
896
897 /* MUL temp.x, patch_stride (input_vals.x), rel_patch_id (r0.y (tcs)) */
898 /* ADD
899 Dimension - patch0_offset (input_vals.z),
900 Non-dim - patch0_data_offset (input_vals.w)
901 */
902 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
903 temp_reg, 0,
904 ctx->tess_output_info, 0,
905 0, rel_patch_chan,
906 ctx->tess_output_info, is_patch_var ? 3 : 2);
907 if (r)
908 return r;
909 return 0;
910 }
911
912 static inline int get_address_file_reg(struct r600_shader_ctx *ctx, int index)
913 {
914 return index > 0 ? ctx->bc->index_reg[index - 1] : ctx->bc->ar_reg;
915 }
916
917 static int r600_get_temp(struct r600_shader_ctx *ctx)
918 {
919 return ctx->temp_reg + ctx->max_driver_temp_used++;
920 }
921
922 static int vs_add_primid_output(struct r600_shader_ctx *ctx, int prim_id_sid)
923 {
924 int i;
925 i = ctx->shader->noutput++;
926 ctx->shader->output[i].name = TGSI_SEMANTIC_PRIMID;
927 ctx->shader->output[i].sid = 0;
928 ctx->shader->output[i].gpr = 0;
929 ctx->shader->output[i].interpolate = TGSI_INTERPOLATE_CONSTANT;
930 ctx->shader->output[i].write_mask = 0x4;
931 ctx->shader->output[i].spi_sid = prim_id_sid;
932
933 return 0;
934 }
935
936 static int tgsi_barrier(struct r600_shader_ctx *ctx)
937 {
938 struct r600_bytecode_alu alu;
939 int r;
940
941 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
942 alu.op = ctx->inst_info->op;
943 alu.last = 1;
944
945 r = r600_bytecode_add_alu(ctx->bc, &alu);
946 if (r)
947 return r;
948 return 0;
949 }
950
951 static void choose_spill_arrays(struct r600_shader_ctx *ctx, int *regno, unsigned *scratch_space_needed)
952 {
953 // pick largest array and spill it, repeat until the number of temps is under limit or we run out of arrays
954 unsigned n = ctx->info.array_max[TGSI_FILE_TEMPORARY];
955 unsigned narrays_left = n;
956 bool *spilled = ctx->spilled_arrays; // assumed calloc:ed
957
958 *scratch_space_needed = 0;
959 while (*regno > 124 && narrays_left) {
960 unsigned i;
961 unsigned largest = 0;
962 unsigned largest_index = 0;
963
964 for (i = 0; i < n; i++) {
965 unsigned size = ctx->array_infos[i].range.Last - ctx->array_infos[i].range.First + 1;
966 if (!spilled[i] && size > largest) {
967 largest = size;
968 largest_index = i;
969 }
970 }
971
972 spilled[largest_index] = true;
973 *regno -= largest;
974 *scratch_space_needed += largest;
975
976 narrays_left --;
977 }
978
979 if (narrays_left == 0) {
980 ctx->info.indirect_files &= ~(1 << TGSI_FILE_TEMPORARY);
981 }
982 }
983
984 /* Take spilled temp arrays into account when translating tgsi register
985 * indexes into r600 gprs if spilled is false, or scratch array offset if
986 * spilled is true */
987 static int map_tgsi_reg_index_to_r600_gpr(struct r600_shader_ctx *ctx, unsigned tgsi_reg_index, bool *spilled)
988 {
989 unsigned i;
990 unsigned spilled_size = 0;
991
992 for (i = 0; i < ctx->info.array_max[TGSI_FILE_TEMPORARY]; i++) {
993 if (tgsi_reg_index >= ctx->array_infos[i].range.First && tgsi_reg_index <= ctx->array_infos[i].range.Last) {
994 if (ctx->spilled_arrays[i]) {
995 /* vec4 index into spilled scratch memory */
996 *spilled = true;
997 return tgsi_reg_index - ctx->array_infos[i].range.First + spilled_size;
998 }
999 else {
1000 /* regular GPR array */
1001 *spilled = false;
1002 return tgsi_reg_index - spilled_size + ctx->file_offset[TGSI_FILE_TEMPORARY];
1003 }
1004 }
1005
1006 if (tgsi_reg_index < ctx->array_infos[i].range.First)
1007 break;
1008 if (ctx->spilled_arrays[i]) {
1009 spilled_size += ctx->array_infos[i].range.Last - ctx->array_infos[i].range.First + 1;
1010 }
1011 }
1012
1013 /* regular GPR index, minus the holes from spilled arrays */
1014 *spilled = false;
1015
1016 return tgsi_reg_index - spilled_size + ctx->file_offset[TGSI_FILE_TEMPORARY];
1017 }
1018
1019 /* look up spill area base offset and array size for a spilled temp array */
1020 static void get_spilled_array_base_and_size(struct r600_shader_ctx *ctx, unsigned tgsi_reg_index,
1021 unsigned *array_base, unsigned *array_size)
1022 {
1023 unsigned i;
1024 unsigned offset = 0;
1025
1026 for (i = 0; i < ctx->info.array_max[TGSI_FILE_TEMPORARY]; i++) {
1027 if (ctx->spilled_arrays[i]) {
1028 unsigned size = ctx->array_infos[i].range.Last - ctx->array_infos[i].range.First + 1;
1029
1030 if (tgsi_reg_index >= ctx->array_infos[i].range.First && tgsi_reg_index <= ctx->array_infos[i].range.Last) {
1031 *array_base = offset;
1032 *array_size = size - 1; /* hw counts from 1 */
1033
1034 return;
1035 }
1036
1037 offset += size;
1038 }
1039 }
1040 }
1041
1042 static int tgsi_declaration(struct r600_shader_ctx *ctx)
1043 {
1044 struct tgsi_full_declaration *d = &ctx->parse.FullToken.FullDeclaration;
1045 int r, i, j, count = d->Range.Last - d->Range.First + 1;
1046
1047 switch (d->Declaration.File) {
1048 case TGSI_FILE_INPUT:
1049 for (j = 0; j < count; j++) {
1050 i = ctx->shader->ninput + j;
1051 assert(i < ARRAY_SIZE(ctx->shader->input));
1052 ctx->shader->input[i].name = d->Semantic.Name;
1053 ctx->shader->input[i].sid = d->Semantic.Index + j;
1054 ctx->shader->input[i].interpolate = d->Interp.Interpolate;
1055 ctx->shader->input[i].interpolate_location = d->Interp.Location;
1056 ctx->shader->input[i].gpr = ctx->file_offset[TGSI_FILE_INPUT] + d->Range.First + j;
1057 if (ctx->type == PIPE_SHADER_FRAGMENT) {
1058 ctx->shader->input[i].spi_sid = r600_spi_sid(&ctx->shader->input[i]);
1059 switch (ctx->shader->input[i].name) {
1060 case TGSI_SEMANTIC_FACE:
1061 if (ctx->face_gpr != -1)
1062 ctx->shader->input[i].gpr = ctx->face_gpr; /* already allocated by allocate_system_value_inputs */
1063 else
1064 ctx->face_gpr = ctx->shader->input[i].gpr;
1065 break;
1066 case TGSI_SEMANTIC_COLOR:
1067 ctx->colors_used++;
1068 break;
1069 case TGSI_SEMANTIC_POSITION:
1070 ctx->fragcoord_input = i;
1071 break;
1072 case TGSI_SEMANTIC_PRIMID:
1073 /* set this for now */
1074 ctx->shader->gs_prim_id_input = true;
1075 ctx->shader->ps_prim_id_input = i;
1076 break;
1077 }
1078 if (ctx->bc->chip_class >= EVERGREEN) {
1079 if ((r = evergreen_interp_input(ctx, i)))
1080 return r;
1081 }
1082 } else if (ctx->type == PIPE_SHADER_GEOMETRY) {
1083 /* FIXME probably skip inputs if they aren't passed in the ring */
1084 ctx->shader->input[i].ring_offset = ctx->next_ring_offset;
1085 ctx->next_ring_offset += 16;
1086 if (ctx->shader->input[i].name == TGSI_SEMANTIC_PRIMID)
1087 ctx->shader->gs_prim_id_input = true;
1088 }
1089 }
1090 ctx->shader->ninput += count;
1091 break;
1092 case TGSI_FILE_OUTPUT:
1093 for (j = 0; j < count; j++) {
1094 i = ctx->shader->noutput + j;
1095 assert(i < ARRAY_SIZE(ctx->shader->output));
1096 ctx->shader->output[i].name = d->Semantic.Name;
1097 ctx->shader->output[i].sid = d->Semantic.Index + j;
1098 ctx->shader->output[i].gpr = ctx->file_offset[TGSI_FILE_OUTPUT] + d->Range.First + j;
1099 ctx->shader->output[i].interpolate = d->Interp.Interpolate;
1100 ctx->shader->output[i].write_mask = d->Declaration.UsageMask;
1101 if (ctx->type == PIPE_SHADER_VERTEX ||
1102 ctx->type == PIPE_SHADER_GEOMETRY ||
1103 ctx->type == PIPE_SHADER_TESS_EVAL) {
1104 ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);
1105 switch (d->Semantic.Name) {
1106 case TGSI_SEMANTIC_CLIPDIST:
1107 break;
1108 case TGSI_SEMANTIC_PSIZE:
1109 ctx->shader->vs_out_misc_write = 1;
1110 ctx->shader->vs_out_point_size = 1;
1111 break;
1112 case TGSI_SEMANTIC_EDGEFLAG:
1113 ctx->shader->vs_out_misc_write = 1;
1114 ctx->shader->vs_out_edgeflag = 1;
1115 ctx->edgeflag_output = i;
1116 break;
1117 case TGSI_SEMANTIC_VIEWPORT_INDEX:
1118 ctx->shader->vs_out_misc_write = 1;
1119 ctx->shader->vs_out_viewport = 1;
1120 break;
1121 case TGSI_SEMANTIC_LAYER:
1122 ctx->shader->vs_out_misc_write = 1;
1123 ctx->shader->vs_out_layer = 1;
1124 break;
1125 case TGSI_SEMANTIC_CLIPVERTEX:
1126 ctx->clip_vertex_write = TRUE;
1127 ctx->cv_output = i;
1128 break;
1129 }
1130 if (ctx->type == PIPE_SHADER_GEOMETRY) {
1131 ctx->gs_out_ring_offset += 16;
1132 }
1133 } else if (ctx->type == PIPE_SHADER_FRAGMENT) {
1134 switch (d->Semantic.Name) {
1135 case TGSI_SEMANTIC_COLOR:
1136 ctx->shader->nr_ps_max_color_exports++;
1137 break;
1138 }
1139 }
1140 }
1141 ctx->shader->noutput += count;
1142 break;
1143 case TGSI_FILE_TEMPORARY:
1144 if (ctx->info.indirect_files & (1 << TGSI_FILE_TEMPORARY)) {
1145 if (d->Array.ArrayID) {
1146 bool spilled;
1147 unsigned idx = map_tgsi_reg_index_to_r600_gpr(ctx,
1148 d->Range.First,
1149 &spilled);
1150
1151 if (!spilled) {
1152 r600_add_gpr_array(ctx->shader, idx,
1153 d->Range.Last - d->Range.First + 1, 0x0F);
1154 }
1155 }
1156 }
1157 break;
1158
1159 case TGSI_FILE_CONSTANT:
1160 case TGSI_FILE_SAMPLER:
1161 case TGSI_FILE_SAMPLER_VIEW:
1162 case TGSI_FILE_ADDRESS:
1163 case TGSI_FILE_BUFFER:
1164 case TGSI_FILE_IMAGE:
1165 case TGSI_FILE_MEMORY:
1166 break;
1167
1168 case TGSI_FILE_HW_ATOMIC:
1169 i = ctx->shader->nhwatomic_ranges;
1170 ctx->shader->atomics[i].start = d->Range.First;
1171 ctx->shader->atomics[i].end = d->Range.Last;
1172 ctx->shader->atomics[i].hw_idx = ctx->shader->atomic_base + ctx->shader->nhwatomic;
1173 ctx->shader->atomics[i].array_id = d->Array.ArrayID;
1174 ctx->shader->atomics[i].buffer_id = d->Dim.Index2D;
1175 ctx->shader->nhwatomic_ranges++;
1176 ctx->shader->nhwatomic += count;
1177 break;
1178
1179 case TGSI_FILE_SYSTEM_VALUE:
1180 if (d->Semantic.Name == TGSI_SEMANTIC_SAMPLEMASK ||
1181 d->Semantic.Name == TGSI_SEMANTIC_SAMPLEID ||
1182 d->Semantic.Name == TGSI_SEMANTIC_SAMPLEPOS) {
1183 break; /* Already handled from allocate_system_value_inputs */
1184 } else if (d->Semantic.Name == TGSI_SEMANTIC_INSTANCEID) {
1185 break;
1186 } else if (d->Semantic.Name == TGSI_SEMANTIC_VERTEXID)
1187 break;
1188 else if (d->Semantic.Name == TGSI_SEMANTIC_INVOCATIONID)
1189 break;
1190 else if (d->Semantic.Name == TGSI_SEMANTIC_TESSINNER ||
1191 d->Semantic.Name == TGSI_SEMANTIC_TESSOUTER) {
1192 int param = r600_get_lds_unique_index(d->Semantic.Name, 0);
1193 int dreg = d->Semantic.Name == TGSI_SEMANTIC_TESSINNER ? 3 : 2;
1194 unsigned temp_reg = r600_get_temp(ctx);
1195
1196 r = get_lds_offset0(ctx, 2, temp_reg, true);
1197 if (r)
1198 return r;
1199
1200 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
1201 temp_reg, 0,
1202 temp_reg, 0,
1203 V_SQ_ALU_SRC_LITERAL, param * 16);
1204 if (r)
1205 return r;
1206
1207 do_lds_fetch_values(ctx, temp_reg, dreg, 0xf);
1208 }
1209 else if (d->Semantic.Name == TGSI_SEMANTIC_TESSCOORD) {
1210 /* MOV r1.x, r0.x;
1211 MOV r1.y, r0.y;
1212 */
1213 for (i = 0; i < 2; i++) {
1214 struct r600_bytecode_alu alu;
1215 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1216 alu.op = ALU_OP1_MOV;
1217 alu.src[0].sel = 0;
1218 alu.src[0].chan = 0 + i;
1219 alu.dst.sel = 1;
1220 alu.dst.chan = 0 + i;
1221 alu.dst.write = 1;
1222 alu.last = (i == 1) ? 1 : 0;
1223 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1224 return r;
1225 }
1226 /* ADD r1.z, 1.0f, -r0.x */
1227 struct r600_bytecode_alu alu;
1228 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1229 alu.op = ALU_OP2_ADD;
1230 alu.src[0].sel = V_SQ_ALU_SRC_1;
1231 alu.src[1].sel = 1;
1232 alu.src[1].chan = 0;
1233 alu.src[1].neg = 1;
1234 alu.dst.sel = 1;
1235 alu.dst.chan = 2;
1236 alu.dst.write = 1;
1237 alu.last = 1;
1238 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1239 return r;
1240
1241 /* ADD r1.z, r1.z, -r1.y */
1242 alu.op = ALU_OP2_ADD;
1243 alu.src[0].sel = 1;
1244 alu.src[0].chan = 2;
1245 alu.src[1].sel = 1;
1246 alu.src[1].chan = 1;
1247 alu.src[1].neg = 1;
1248 alu.dst.sel = 1;
1249 alu.dst.chan = 2;
1250 alu.dst.write = 1;
1251 alu.last = 1;
1252 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1253 return r;
1254 break;
1255 }
1256 break;
1257 default:
1258 R600_ERR("unsupported file %d declaration\n", d->Declaration.File);
1259 return -EINVAL;
1260 }
1261 return 0;
1262 }
1263
1264 static int allocate_system_value_inputs(struct r600_shader_ctx *ctx, int gpr_offset)
1265 {
1266 struct tgsi_parse_context parse;
1267 struct {
1268 boolean enabled;
1269 int *reg;
1270 unsigned name, alternate_name;
1271 } inputs[2] = {
1272 { false, &ctx->face_gpr, TGSI_SEMANTIC_SAMPLEMASK, ~0u }, /* lives in Front Face GPR.z */
1273
1274 { false, &ctx->fixed_pt_position_gpr, TGSI_SEMANTIC_SAMPLEID, TGSI_SEMANTIC_SAMPLEPOS } /* SAMPLEID is in Fixed Point Position GPR.w */
1275 };
1276 int num_regs = 0;
1277 unsigned k, i;
1278
1279 if (tgsi_parse_init(&parse, ctx->tokens) != TGSI_PARSE_OK) {
1280 return 0;
1281 }
1282
1283 /* need to scan shader for system values and interpolateAtSample/Offset/Centroid */
1284 while (!tgsi_parse_end_of_tokens(&parse)) {
1285 tgsi_parse_token(&parse);
1286
1287 if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_INSTRUCTION) {
1288 const struct tgsi_full_instruction *inst = &parse.FullToken.FullInstruction;
1289 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE ||
1290 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
1291 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_CENTROID)
1292 {
1293 int interpolate, location, k;
1294
1295 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
1296 location = TGSI_INTERPOLATE_LOC_CENTER;
1297 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
1298 location = TGSI_INTERPOLATE_LOC_CENTER;
1299 /* Needs sample positions, currently those are always available */
1300 } else {
1301 location = TGSI_INTERPOLATE_LOC_CENTROID;
1302 }
1303
1304 interpolate = ctx->info.input_interpolate[inst->Src[0].Register.Index];
1305 k = eg_get_interpolator_index(interpolate, location);
1306 if (k >= 0)
1307 ctx->eg_interpolators[k].enabled = true;
1308 }
1309 } else if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_DECLARATION) {
1310 struct tgsi_full_declaration *d = &parse.FullToken.FullDeclaration;
1311 if (d->Declaration.File == TGSI_FILE_SYSTEM_VALUE) {
1312 for (k = 0; k < ARRAY_SIZE(inputs); k++) {
1313 if (d->Semantic.Name == inputs[k].name ||
1314 d->Semantic.Name == inputs[k].alternate_name) {
1315 inputs[k].enabled = true;
1316 }
1317 }
1318 }
1319 }
1320 }
1321
1322 tgsi_parse_free(&parse);
1323
1324 if (ctx->info.reads_samplemask &&
1325 (ctx->info.uses_linear_sample || ctx->info.uses_persp_sample)) {
1326 inputs[1].enabled = true;
1327 }
1328
1329 if (ctx->bc->chip_class >= EVERGREEN) {
1330 int num_baryc = 0;
1331 /* assign gpr to each interpolator according to priority */
1332 for (i = 0; i < ARRAY_SIZE(ctx->eg_interpolators); i++) {
1333 if (ctx->eg_interpolators[i].enabled) {
1334 ctx->eg_interpolators[i].ij_index = num_baryc;
1335 num_baryc++;
1336 }
1337 }
1338 num_baryc = (num_baryc + 1) >> 1;
1339 gpr_offset += num_baryc;
1340 }
1341
1342 for (i = 0; i < ARRAY_SIZE(inputs); i++) {
1343 boolean enabled = inputs[i].enabled;
1344 int *reg = inputs[i].reg;
1345 unsigned name = inputs[i].name;
1346
1347 if (enabled) {
1348 int gpr = gpr_offset + num_regs++;
1349 ctx->shader->nsys_inputs++;
1350
1351 // add to inputs, allocate a gpr
1352 k = ctx->shader->ninput++;
1353 ctx->shader->input[k].name = name;
1354 ctx->shader->input[k].sid = 0;
1355 ctx->shader->input[k].interpolate = TGSI_INTERPOLATE_CONSTANT;
1356 ctx->shader->input[k].interpolate_location = TGSI_INTERPOLATE_LOC_CENTER;
1357 *reg = ctx->shader->input[k].gpr = gpr;
1358 }
1359 }
1360
1361 return gpr_offset + num_regs;
1362 }
1363
1364 /*
1365 * for evergreen we need to scan the shader to find the number of GPRs we need to
1366 * reserve for interpolation and system values
1367 *
1368 * we need to know if we are going to emit any sample or centroid inputs
1369 * if perspective and linear are required
1370 */
1371 static int evergreen_gpr_count(struct r600_shader_ctx *ctx)
1372 {
1373 unsigned i;
1374
1375 memset(&ctx->eg_interpolators, 0, sizeof(ctx->eg_interpolators));
1376
1377 /*
1378 * Could get this information from the shader info. But right now
1379 * we interpolate all declared inputs, whereas the shader info will
1380 * only contain the bits if the inputs are actually used, so it might
1381 * not be safe...
1382 */
1383 for (i = 0; i < ctx->info.num_inputs; i++) {
1384 int k;
1385 /* skip position/face/mask/sampleid */
1386 if (ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_POSITION ||
1387 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_FACE ||
1388 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_SAMPLEMASK ||
1389 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_SAMPLEID)
1390 continue;
1391
1392 k = eg_get_interpolator_index(
1393 ctx->info.input_interpolate[i],
1394 ctx->info.input_interpolate_loc[i]);
1395 if (k >= 0)
1396 ctx->eg_interpolators[k].enabled = TRUE;
1397 }
1398
1399 /* XXX PULL MODEL and LINE STIPPLE */
1400
1401 return allocate_system_value_inputs(ctx, 0);
1402 }
1403
1404 /* sample_id_sel == NULL means fetch for current sample */
1405 static int load_sample_position(struct r600_shader_ctx *ctx, struct r600_shader_src *sample_id, int chan_sel)
1406 {
1407 struct r600_bytecode_vtx vtx;
1408 int r, t1;
1409
1410 t1 = r600_get_temp(ctx);
1411
1412 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
1413 vtx.op = FETCH_OP_VFETCH;
1414 vtx.buffer_id = R600_BUFFER_INFO_CONST_BUFFER;
1415 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1416 if (sample_id == NULL) {
1417 assert(ctx->fixed_pt_position_gpr != -1);
1418
1419 vtx.src_gpr = ctx->fixed_pt_position_gpr; // SAMPLEID is in .w;
1420 vtx.src_sel_x = 3;
1421 }
1422 else {
1423 struct r600_bytecode_alu alu;
1424
1425 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1426 alu.op = ALU_OP1_MOV;
1427 r600_bytecode_src(&alu.src[0], sample_id, chan_sel);
1428 alu.dst.sel = t1;
1429 alu.dst.write = 1;
1430 alu.last = 1;
1431 r = r600_bytecode_add_alu(ctx->bc, &alu);
1432 if (r)
1433 return r;
1434
1435 vtx.src_gpr = t1;
1436 vtx.src_sel_x = 0;
1437 }
1438 vtx.mega_fetch_count = 16;
1439 vtx.dst_gpr = t1;
1440 vtx.dst_sel_x = 0;
1441 vtx.dst_sel_y = 1;
1442 vtx.dst_sel_z = 2;
1443 vtx.dst_sel_w = 3;
1444 vtx.data_format = FMT_32_32_32_32_FLOAT;
1445 vtx.num_format_all = 2;
1446 vtx.format_comp_all = 1;
1447 vtx.use_const_fields = 0;
1448 vtx.offset = 0;
1449 vtx.endian = r600_endian_swap(32);
1450 vtx.srf_mode_all = 1; /* SRF_MODE_NO_ZERO */
1451
1452 r = r600_bytecode_add_vtx(ctx->bc, &vtx);
1453 if (r)
1454 return r;
1455
1456 return t1;
1457 }
1458
1459 static int eg_load_helper_invocation(struct r600_shader_ctx *ctx)
1460 {
1461 int r;
1462 struct r600_bytecode_alu alu;
1463
1464 /* do a vtx fetch with wqm set on the vtx fetch */
1465 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1466 alu.op = ALU_OP1_MOV;
1467 alu.dst.sel = ctx->helper_invoc_reg;
1468 alu.dst.chan = 0;
1469 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
1470 alu.src[0].value = 0xffffffff;
1471 alu.dst.write = 1;
1472 alu.last = 1;
1473 r = r600_bytecode_add_alu(ctx->bc, &alu);
1474 if (r)
1475 return r;
1476
1477 /* do a vtx fetch in VPM mode */
1478 struct r600_bytecode_vtx vtx;
1479 memset(&vtx, 0, sizeof(vtx));
1480 vtx.op = FETCH_OP_GET_BUFFER_RESINFO;
1481 vtx.buffer_id = R600_BUFFER_INFO_CONST_BUFFER;
1482 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1483 vtx.src_gpr = 0;
1484 vtx.mega_fetch_count = 16; /* no idea here really... */
1485 vtx.dst_gpr = ctx->helper_invoc_reg;
1486 vtx.dst_sel_x = 4;
1487 vtx.dst_sel_y = 7; /* SEL_Y */
1488 vtx.dst_sel_z = 7; /* SEL_Z */
1489 vtx.dst_sel_w = 7; /* SEL_W */
1490 vtx.data_format = FMT_32;
1491 if ((r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx)))
1492 return r;
1493 ctx->bc->cf_last->vpm = 1;
1494 return 0;
1495 }
1496
1497 static int cm_load_helper_invocation(struct r600_shader_ctx *ctx)
1498 {
1499 int r;
1500 struct r600_bytecode_alu alu;
1501
1502 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1503 alu.op = ALU_OP1_MOV;
1504 alu.dst.sel = ctx->helper_invoc_reg;
1505 alu.dst.chan = 0;
1506 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
1507 alu.src[0].value = 0xffffffff;
1508 alu.dst.write = 1;
1509 alu.last = 1;
1510 r = r600_bytecode_add_alu(ctx->bc, &alu);
1511 if (r)
1512 return r;
1513
1514 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1515 alu.op = ALU_OP1_MOV;
1516 alu.dst.sel = ctx->helper_invoc_reg;
1517 alu.dst.chan = 0;
1518 alu.src[0].sel = V_SQ_ALU_SRC_0;
1519 alu.dst.write = 1;
1520 alu.last = 1;
1521 r = r600_bytecode_add_alu_type(ctx->bc, &alu, CF_OP_ALU_VALID_PIXEL_MODE);
1522 if (r)
1523 return r;
1524
1525 return ctx->helper_invoc_reg;
1526 }
1527
1528 static int load_block_grid_size(struct r600_shader_ctx *ctx, bool load_block)
1529 {
1530 struct r600_bytecode_vtx vtx;
1531 int r, t1;
1532
1533 if (ctx->cs_block_size_loaded)
1534 return ctx->cs_block_size_reg;
1535 if (ctx->cs_grid_size_loaded)
1536 return ctx->cs_grid_size_reg;
1537
1538 t1 = load_block ? ctx->cs_block_size_reg : ctx->cs_grid_size_reg;
1539 struct r600_bytecode_alu alu;
1540 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1541 alu.op = ALU_OP1_MOV;
1542 alu.src[0].sel = V_SQ_ALU_SRC_0;
1543 alu.dst.sel = t1;
1544 alu.dst.write = 1;
1545 alu.last = 1;
1546 r = r600_bytecode_add_alu(ctx->bc, &alu);
1547 if (r)
1548 return r;
1549
1550 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
1551 vtx.op = FETCH_OP_VFETCH;
1552 vtx.buffer_id = R600_BUFFER_INFO_CONST_BUFFER;
1553 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1554 vtx.src_gpr = t1;
1555 vtx.src_sel_x = 0;
1556
1557 vtx.mega_fetch_count = 16;
1558 vtx.dst_gpr = t1;
1559 vtx.dst_sel_x = 0;
1560 vtx.dst_sel_y = 1;
1561 vtx.dst_sel_z = 2;
1562 vtx.dst_sel_w = 7;
1563 vtx.data_format = FMT_32_32_32_32;
1564 vtx.num_format_all = 1;
1565 vtx.format_comp_all = 0;
1566 vtx.use_const_fields = 0;
1567 vtx.offset = load_block ? 0 : 16; // first element is size of buffer
1568 vtx.endian = r600_endian_swap(32);
1569 vtx.srf_mode_all = 1; /* SRF_MODE_NO_ZERO */
1570
1571 r = r600_bytecode_add_vtx(ctx->bc, &vtx);
1572 if (r)
1573 return r;
1574
1575 if (load_block)
1576 ctx->cs_block_size_loaded = true;
1577 else
1578 ctx->cs_grid_size_loaded = true;
1579 return t1;
1580 }
1581
1582 static void tgsi_src(struct r600_shader_ctx *ctx,
1583 const struct tgsi_full_src_register *tgsi_src,
1584 struct r600_shader_src *r600_src)
1585 {
1586 memset(r600_src, 0, sizeof(*r600_src));
1587 r600_src->swizzle[0] = tgsi_src->Register.SwizzleX;
1588 r600_src->swizzle[1] = tgsi_src->Register.SwizzleY;
1589 r600_src->swizzle[2] = tgsi_src->Register.SwizzleZ;
1590 r600_src->swizzle[3] = tgsi_src->Register.SwizzleW;
1591 r600_src->neg = tgsi_src->Register.Negate;
1592 r600_src->abs = tgsi_src->Register.Absolute;
1593
1594 if (tgsi_src->Register.File == TGSI_FILE_TEMPORARY) {
1595 bool spilled;
1596 unsigned idx;
1597
1598 idx = map_tgsi_reg_index_to_r600_gpr(ctx, tgsi_src->Register.Index, &spilled);
1599
1600 if (spilled) {
1601 int reg = r600_get_temp(ctx);
1602 int r;
1603
1604 r600_src->sel = reg;
1605
1606 if (ctx->bc->chip_class < R700) {
1607 struct r600_bytecode_output cf;
1608
1609 memset(&cf, 0, sizeof(struct r600_bytecode_output));
1610 cf.op = CF_OP_MEM_SCRATCH;
1611 cf.elem_size = 3;
1612 cf.gpr = reg;
1613 cf.comp_mask = 0xF;
1614 cf.swizzle_x = 0;
1615 cf.swizzle_y = 1;
1616 cf.swizzle_z = 2;
1617 cf.swizzle_w = 3;
1618 cf.burst_count = 1;
1619
1620 get_spilled_array_base_and_size(ctx, tgsi_src->Register.Index,
1621 &cf.array_base, &cf.array_size);
1622
1623 if (tgsi_src->Register.Indirect) {
1624 cf.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_READ_IND;
1625 cf.index_gpr = ctx->bc->ar_reg;
1626 }
1627 else {
1628 cf.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_READ;
1629 cf.array_base += idx;
1630 cf.array_size = 0;
1631 }
1632
1633 r = r600_bytecode_add_output(ctx->bc, &cf);
1634 }
1635 else {
1636 struct r600_bytecode_vtx vtx;
1637
1638 if (r600_bytecode_get_need_wait_ack(ctx->bc)) {
1639 r600_bytecode_need_wait_ack(ctx->bc, false);
1640 r = r600_bytecode_add_cfinst(ctx->bc, CF_OP_WAIT_ACK);
1641 }
1642
1643 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
1644 vtx.op = FETCH_OP_READ_SCRATCH;
1645 vtx.dst_gpr = reg;
1646 vtx.uncached = 1; // Must bypass cache since prior spill written in same invocation
1647 vtx.elem_size = 3;
1648 vtx.data_format = FMT_32_32_32_32;
1649 vtx.num_format_all = V_038010_SQ_NUM_FORMAT_INT;
1650 vtx.dst_sel_x = tgsi_src->Register.SwizzleX;
1651 vtx.dst_sel_y = tgsi_src->Register.SwizzleY;
1652 vtx.dst_sel_z = tgsi_src->Register.SwizzleZ;
1653 vtx.dst_sel_w = tgsi_src->Register.SwizzleW;
1654
1655 get_spilled_array_base_and_size(ctx, tgsi_src->Register.Index,
1656 &vtx.array_base, &vtx.array_size);
1657
1658 if (tgsi_src->Register.Indirect) {
1659 vtx.indexed = 1;
1660 vtx.src_gpr = ctx->bc->ar_reg;
1661 }
1662 else {
1663 vtx.array_base += idx;
1664 vtx.array_size = 0;
1665 }
1666
1667 r = r600_bytecode_add_vtx(ctx->bc, &vtx);
1668 }
1669
1670 if (r)
1671 return;
1672 }
1673 else {
1674 if (tgsi_src->Register.Indirect)
1675 r600_src->rel = V_SQ_REL_RELATIVE;
1676
1677 r600_src->sel = idx;
1678 }
1679
1680 return;
1681 }
1682
1683 if (tgsi_src->Register.File == TGSI_FILE_IMMEDIATE) {
1684 int index;
1685 if ((tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleY) &&
1686 (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleZ) &&
1687 (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleW)) {
1688
1689 index = tgsi_src->Register.Index * 4 + tgsi_src->Register.SwizzleX;
1690 r600_bytecode_special_constants(ctx->literals[index], &r600_src->sel, &r600_src->neg, r600_src->abs);
1691 if (r600_src->sel != V_SQ_ALU_SRC_LITERAL)
1692 return;
1693 }
1694 index = tgsi_src->Register.Index;
1695 r600_src->sel = V_SQ_ALU_SRC_LITERAL;
1696 memcpy(r600_src->value, ctx->literals + index * 4, sizeof(r600_src->value));
1697 } else if (tgsi_src->Register.File == TGSI_FILE_SYSTEM_VALUE) {
1698 if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEMASK) {
1699 r600_src->swizzle[0] = 2; // Z value
1700 r600_src->swizzle[1] = 2;
1701 r600_src->swizzle[2] = 2;
1702 r600_src->swizzle[3] = 2;
1703 r600_src->sel = ctx->face_gpr;
1704 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEID) {
1705 r600_src->swizzle[0] = 3; // W value
1706 r600_src->swizzle[1] = 3;
1707 r600_src->swizzle[2] = 3;
1708 r600_src->swizzle[3] = 3;
1709 r600_src->sel = ctx->fixed_pt_position_gpr;
1710 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEPOS) {
1711 r600_src->swizzle[0] = 0;
1712 r600_src->swizzle[1] = 1;
1713 r600_src->swizzle[2] = 4;
1714 r600_src->swizzle[3] = 4;
1715 r600_src->sel = load_sample_position(ctx, NULL, -1);
1716 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INSTANCEID) {
1717 r600_src->swizzle[0] = 3;
1718 r600_src->swizzle[1] = 3;
1719 r600_src->swizzle[2] = 3;
1720 r600_src->swizzle[3] = 3;
1721 r600_src->sel = 0;
1722 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_VERTEXID) {
1723 r600_src->swizzle[0] = 0;
1724 r600_src->swizzle[1] = 0;
1725 r600_src->swizzle[2] = 0;
1726 r600_src->swizzle[3] = 0;
1727 r600_src->sel = 0;
1728 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_THREAD_ID) {
1729 r600_src->sel = 0;
1730 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_BLOCK_ID) {
1731 r600_src->sel = 1;
1732 } else if (ctx->type != PIPE_SHADER_TESS_CTRL && ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INVOCATIONID) {
1733 r600_src->swizzle[0] = 3;
1734 r600_src->swizzle[1] = 3;
1735 r600_src->swizzle[2] = 3;
1736 r600_src->swizzle[3] = 3;
1737 r600_src->sel = 1;
1738 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INVOCATIONID) {
1739 r600_src->swizzle[0] = 2;
1740 r600_src->swizzle[1] = 2;
1741 r600_src->swizzle[2] = 2;
1742 r600_src->swizzle[3] = 2;
1743 r600_src->sel = 0;
1744 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_TESSCOORD) {
1745 r600_src->sel = 1;
1746 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_TESSINNER) {
1747 r600_src->sel = 3;
1748 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_TESSOUTER) {
1749 r600_src->sel = 2;
1750 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_VERTICESIN) {
1751 r600_src->sel = ctx->tess_input_info;
1752 r600_src->swizzle[0] = 2;
1753 r600_src->swizzle[1] = 2;
1754 r600_src->swizzle[2] = 2;
1755 r600_src->swizzle[3] = 2;
1756 } else if (ctx->type == PIPE_SHADER_TESS_CTRL && ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_PRIMID) {
1757 r600_src->sel = 0;
1758 r600_src->swizzle[0] = 0;
1759 r600_src->swizzle[1] = 0;
1760 r600_src->swizzle[2] = 0;
1761 r600_src->swizzle[3] = 0;
1762 } else if (ctx->type == PIPE_SHADER_TESS_EVAL && ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_PRIMID) {
1763 r600_src->sel = 0;
1764 r600_src->swizzle[0] = 3;
1765 r600_src->swizzle[1] = 3;
1766 r600_src->swizzle[2] = 3;
1767 r600_src->swizzle[3] = 3;
1768 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_GRID_SIZE) {
1769 r600_src->sel = load_block_grid_size(ctx, false);
1770 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_BLOCK_SIZE) {
1771 r600_src->sel = load_block_grid_size(ctx, true);
1772 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_HELPER_INVOCATION) {
1773 r600_src->sel = ctx->helper_invoc_reg;
1774 r600_src->swizzle[0] = 0;
1775 r600_src->swizzle[1] = 0;
1776 r600_src->swizzle[2] = 0;
1777 r600_src->swizzle[3] = 0;
1778 }
1779 } else {
1780 if (tgsi_src->Register.Indirect)
1781 r600_src->rel = V_SQ_REL_RELATIVE;
1782 r600_src->sel = tgsi_src->Register.Index;
1783 r600_src->sel += ctx->file_offset[tgsi_src->Register.File];
1784 }
1785 if (tgsi_src->Register.File == TGSI_FILE_CONSTANT) {
1786 if (tgsi_src->Register.Dimension) {
1787 r600_src->kc_bank = tgsi_src->Dimension.Index;
1788 if (tgsi_src->Dimension.Indirect) {
1789 r600_src->kc_rel = 1;
1790 }
1791 }
1792 }
1793 }
1794
1795 static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx,
1796 unsigned int cb_idx, unsigned cb_rel, unsigned int offset, unsigned ar_chan,
1797 unsigned int dst_reg)
1798 {
1799 struct r600_bytecode_vtx vtx;
1800 unsigned int ar_reg;
1801 int r;
1802
1803 if (offset) {
1804 struct r600_bytecode_alu alu;
1805
1806 memset(&alu, 0, sizeof(alu));
1807
1808 alu.op = ALU_OP2_ADD_INT;
1809 alu.src[0].sel = ctx->bc->ar_reg;
1810 alu.src[0].chan = ar_chan;
1811
1812 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
1813 alu.src[1].value = offset;
1814
1815 alu.dst.sel = dst_reg;
1816 alu.dst.chan = ar_chan;
1817 alu.dst.write = 1;
1818 alu.last = 1;
1819
1820 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1821 return r;
1822
1823 ar_reg = dst_reg;
1824 } else {
1825 ar_reg = ctx->bc->ar_reg;
1826 }
1827
1828 memset(&vtx, 0, sizeof(vtx));
1829 vtx.buffer_id = cb_idx;
1830 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1831 vtx.src_gpr = ar_reg;
1832 vtx.src_sel_x = ar_chan;
1833 vtx.mega_fetch_count = 16;
1834 vtx.dst_gpr = dst_reg;
1835 vtx.dst_sel_x = 0; /* SEL_X */
1836 vtx.dst_sel_y = 1; /* SEL_Y */
1837 vtx.dst_sel_z = 2; /* SEL_Z */
1838 vtx.dst_sel_w = 3; /* SEL_W */
1839 vtx.data_format = FMT_32_32_32_32_FLOAT;
1840 vtx.num_format_all = 2; /* NUM_FORMAT_SCALED */
1841 vtx.format_comp_all = 1; /* FORMAT_COMP_SIGNED */
1842 vtx.endian = r600_endian_swap(32);
1843 vtx.buffer_index_mode = cb_rel; // cb_rel ? V_SQ_CF_INDEX_0 : V_SQ_CF_INDEX_NONE;
1844
1845 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
1846 return r;
1847
1848 return 0;
1849 }
1850
1851 static int fetch_gs_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg)
1852 {
1853 struct r600_bytecode_vtx vtx;
1854 int r;
1855 unsigned index = src->Register.Index;
1856 unsigned vtx_id = src->Dimension.Index;
1857 int offset_reg = ctx->gs_rotated_input[vtx_id / 3];
1858 int offset_chan = vtx_id % 3;
1859 int t2 = 0;
1860
1861 /* offsets of per-vertex data in ESGS ring are passed to GS in R0.x, R0.y,
1862 * R0.w, R1.x, R1.y, R1.z (it seems R0.z is used for PrimitiveID) */
1863
1864 if (offset_reg == ctx->gs_rotated_input[0] && offset_chan == 2)
1865 offset_chan = 3;
1866
1867 if (src->Dimension.Indirect || src->Register.Indirect)
1868 t2 = r600_get_temp(ctx);
1869
1870 if (src->Dimension.Indirect) {
1871 int treg[3];
1872 struct r600_bytecode_alu alu;
1873 int r, i;
1874 unsigned addr_reg;
1875 addr_reg = get_address_file_reg(ctx, src->DimIndirect.Index);
1876 if (src->DimIndirect.Index > 0) {
1877 r = single_alu_op2(ctx, ALU_OP1_MOV,
1878 ctx->bc->ar_reg, 0,
1879 addr_reg, 0,
1880 0, 0);
1881 if (r)
1882 return r;
1883 }
1884 /*
1885 we have to put the R0.x/y/w into Rt.x Rt+1.x Rt+2.x then index reg from Rt.
1886 at least this is what fglrx seems to do. */
1887 for (i = 0; i < 3; i++) {
1888 treg[i] = r600_get_temp(ctx);
1889 }
1890 r600_add_gpr_array(ctx->shader, treg[0], 3, 0x0F);
1891
1892 for (i = 0; i < 3; i++) {
1893 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1894 alu.op = ALU_OP1_MOV;
1895 alu.src[0].sel = ctx->gs_rotated_input[0];
1896 alu.src[0].chan = i == 2 ? 3 : i;
1897 alu.dst.sel = treg[i];
1898 alu.dst.chan = 0;
1899 alu.dst.write = 1;
1900 alu.last = 1;
1901 r = r600_bytecode_add_alu(ctx->bc, &alu);
1902 if (r)
1903 return r;
1904 }
1905 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1906 alu.op = ALU_OP1_MOV;
1907 alu.src[0].sel = treg[0];
1908 alu.src[0].rel = 1;
1909 alu.dst.sel = t2;
1910 alu.dst.write = 1;
1911 alu.last = 1;
1912 r = r600_bytecode_add_alu(ctx->bc, &alu);
1913 if (r)
1914 return r;
1915 offset_reg = t2;
1916 offset_chan = 0;
1917 }
1918
1919 if (src->Register.Indirect) {
1920 int addr_reg;
1921 unsigned first = ctx->info.input_array_first[src->Indirect.ArrayID];
1922
1923 addr_reg = get_address_file_reg(ctx, src->Indirect.Index);
1924
1925 /* pull the value from index_reg */
1926 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
1927 t2, 1,
1928 addr_reg, 0,
1929 V_SQ_ALU_SRC_LITERAL, first);
1930 if (r)
1931 return r;
1932 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
1933 t2, 0,
1934 t2, 1,
1935 V_SQ_ALU_SRC_LITERAL, 4,
1936 offset_reg, offset_chan);
1937 if (r)
1938 return r;
1939 offset_reg = t2;
1940 offset_chan = 0;
1941 index = src->Register.Index - first;
1942 }
1943
1944 memset(&vtx, 0, sizeof(vtx));
1945 vtx.buffer_id = R600_GS_RING_CONST_BUFFER;
1946 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1947 vtx.src_gpr = offset_reg;
1948 vtx.src_sel_x = offset_chan;
1949 vtx.offset = index * 16; /*bytes*/
1950 vtx.mega_fetch_count = 16;
1951 vtx.dst_gpr = dst_reg;
1952 vtx.dst_sel_x = 0; /* SEL_X */
1953 vtx.dst_sel_y = 1; /* SEL_Y */
1954 vtx.dst_sel_z = 2; /* SEL_Z */
1955 vtx.dst_sel_w = 3; /* SEL_W */
1956 if (ctx->bc->chip_class >= EVERGREEN) {
1957 vtx.use_const_fields = 1;
1958 } else {
1959 vtx.data_format = FMT_32_32_32_32_FLOAT;
1960 }
1961
1962 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
1963 return r;
1964
1965 return 0;
1966 }
1967
1968 static int tgsi_split_gs_inputs(struct r600_shader_ctx *ctx)
1969 {
1970 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1971 unsigned i;
1972
1973 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
1974 struct tgsi_full_src_register *src = &inst->Src[i];
1975
1976 if (src->Register.File == TGSI_FILE_INPUT) {
1977 if (ctx->shader->input[src->Register.Index].name == TGSI_SEMANTIC_PRIMID) {
1978 /* primitive id is in R0.z */
1979 ctx->src[i].sel = 0;
1980 ctx->src[i].swizzle[0] = 2;
1981 }
1982 }
1983 if (src->Register.File == TGSI_FILE_INPUT && src->Register.Dimension) {
1984 int treg = r600_get_temp(ctx);
1985
1986 fetch_gs_input(ctx, src, treg);
1987 ctx->src[i].sel = treg;
1988 ctx->src[i].rel = 0;
1989 }
1990 }
1991 return 0;
1992 }
1993
1994
1995 /* Tessellation shaders pass outputs to the next shader using LDS.
1996 *
1997 * LS outputs = TCS(HS) inputs
1998 * TCS(HS) outputs = TES(DS) inputs
1999 *
2000 * The LDS layout is:
2001 * - TCS inputs for patch 0
2002 * - TCS inputs for patch 1
2003 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
2004 * - ...
2005 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
2006 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
2007 * - TCS outputs for patch 1
2008 * - Per-patch TCS outputs for patch 1
2009 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
2010 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
2011 * - ...
2012 *
2013 * All three shaders VS(LS), TCS, TES share the same LDS space.
2014 */
2015 /* this will return with the dw address in temp_reg.x */
2016 static int r600_get_byte_address(struct r600_shader_ctx *ctx, int temp_reg,
2017 const struct tgsi_full_dst_register *dst,
2018 const struct tgsi_full_src_register *src,
2019 int stride_bytes_reg, int stride_bytes_chan)
2020 {
2021 struct tgsi_full_dst_register reg;
2022 ubyte *name, *index, *array_first;
2023 int r;
2024 int param;
2025 struct tgsi_shader_info *info = &ctx->info;
2026 /* Set the register description. The address computation is the same
2027 * for sources and destinations. */
2028 if (src) {
2029 reg.Register.File = src->Register.File;
2030 reg.Register.Index = src->Register.Index;
2031 reg.Register.Indirect = src->Register.Indirect;
2032 reg.Register.Dimension = src->Register.Dimension;
2033 reg.Indirect = src->Indirect;
2034 reg.Dimension = src->Dimension;
2035 reg.DimIndirect = src->DimIndirect;
2036 } else
2037 reg = *dst;
2038
2039 /* If the register is 2-dimensional (e.g. an array of vertices
2040 * in a primitive), calculate the base address of the vertex. */
2041 if (reg.Register.Dimension) {
2042 int sel, chan;
2043 if (reg.Dimension.Indirect) {
2044 unsigned addr_reg;
2045 assert (reg.DimIndirect.File == TGSI_FILE_ADDRESS);
2046
2047 addr_reg = get_address_file_reg(ctx, reg.DimIndirect.Index);
2048 /* pull the value from index_reg */
2049 sel = addr_reg;
2050 chan = 0;
2051 } else {
2052 sel = V_SQ_ALU_SRC_LITERAL;
2053 chan = reg.Dimension.Index;
2054 }
2055
2056 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
2057 temp_reg, 0,
2058 stride_bytes_reg, stride_bytes_chan,
2059 sel, chan,
2060 temp_reg, 0);
2061 if (r)
2062 return r;
2063 }
2064
2065 if (reg.Register.File == TGSI_FILE_INPUT) {
2066 name = info->input_semantic_name;
2067 index = info->input_semantic_index;
2068 array_first = info->input_array_first;
2069 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
2070 name = info->output_semantic_name;
2071 index = info->output_semantic_index;
2072 array_first = info->output_array_first;
2073 } else {
2074 assert(0);
2075 return -1;
2076 }
2077 if (reg.Register.Indirect) {
2078 int addr_reg;
2079 int first;
2080 /* Add the relative address of the element. */
2081 if (reg.Indirect.ArrayID)
2082 first = array_first[reg.Indirect.ArrayID];
2083 else
2084 first = reg.Register.Index;
2085
2086 addr_reg = get_address_file_reg(ctx, reg.Indirect.Index);
2087
2088 /* pull the value from index_reg */
2089 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
2090 temp_reg, 0,
2091 V_SQ_ALU_SRC_LITERAL, 16,
2092 addr_reg, 0,
2093 temp_reg, 0);
2094 if (r)
2095 return r;
2096
2097 param = r600_get_lds_unique_index(name[first],
2098 index[first]);
2099
2100 } else {
2101 param = r600_get_lds_unique_index(name[reg.Register.Index],
2102 index[reg.Register.Index]);
2103 }
2104
2105 /* add to base_addr - passed in temp_reg.x */
2106 if (param) {
2107 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
2108 temp_reg, 0,
2109 temp_reg, 0,
2110 V_SQ_ALU_SRC_LITERAL, param * 16);
2111 if (r)
2112 return r;
2113
2114 }
2115 return 0;
2116 }
2117
2118 static int do_lds_fetch_values(struct r600_shader_ctx *ctx, unsigned temp_reg,
2119 unsigned dst_reg, unsigned mask)
2120 {
2121 struct r600_bytecode_alu alu;
2122 int r, i, lasti;
2123
2124 if ((ctx->bc->cf_last->ndw>>1) >= 0x60)
2125 ctx->bc->force_add_cf = 1;
2126
2127 lasti = tgsi_last_instruction(mask);
2128 for (i = 1; i <= lasti; i++) {
2129 if (!(mask & (1 << i)))
2130 continue;
2131
2132 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
2133 temp_reg, i,
2134 temp_reg, 0,
2135 V_SQ_ALU_SRC_LITERAL, 4 * i);
2136 if (r)
2137 return r;
2138 }
2139 for (i = 0; i <= lasti; i++) {
2140 if (!(mask & (1 << i)))
2141 continue;
2142
2143 /* emit an LDS_READ_RET */
2144 memset(&alu, 0, sizeof(alu));
2145 alu.op = LDS_OP1_LDS_READ_RET;
2146 alu.src[0].sel = temp_reg;
2147 alu.src[0].chan = i;
2148 alu.src[1].sel = V_SQ_ALU_SRC_0;
2149 alu.src[2].sel = V_SQ_ALU_SRC_0;
2150 alu.dst.chan = 0;
2151 alu.is_lds_idx_op = true;
2152 alu.last = 1;
2153 r = r600_bytecode_add_alu(ctx->bc, &alu);
2154 if (r)
2155 return r;
2156 }
2157 for (i = 0; i <= lasti; i++) {
2158 if (!(mask & (1 << i)))
2159 continue;
2160
2161 /* then read from LDS_OQ_A_POP */
2162 memset(&alu, 0, sizeof(alu));
2163
2164 alu.op = ALU_OP1_MOV;
2165 alu.src[0].sel = EG_V_SQ_ALU_SRC_LDS_OQ_A_POP;
2166 alu.src[0].chan = 0;
2167 alu.dst.sel = dst_reg;
2168 alu.dst.chan = i;
2169 alu.dst.write = 1;
2170 alu.last = 1;
2171 r = r600_bytecode_add_alu(ctx->bc, &alu);
2172 if (r)
2173 return r;
2174 }
2175 return 0;
2176 }
2177
2178 static int fetch_mask(struct tgsi_src_register *reg)
2179 {
2180 int mask = 0;
2181 mask |= 1 << reg->SwizzleX;
2182 mask |= 1 << reg->SwizzleY;
2183 mask |= 1 << reg->SwizzleZ;
2184 mask |= 1 << reg->SwizzleW;
2185 return mask;
2186 }
2187
2188 static int fetch_tes_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg)
2189 {
2190 int r;
2191 unsigned temp_reg = r600_get_temp(ctx);
2192
2193 r = get_lds_offset0(ctx, 2, temp_reg,
2194 src->Register.Dimension ? false : true);
2195 if (r)
2196 return r;
2197
2198 /* the base address is now in temp.x */
2199 r = r600_get_byte_address(ctx, temp_reg,
2200 NULL, src, ctx->tess_output_info, 1);
2201 if (r)
2202 return r;
2203
2204 r = do_lds_fetch_values(ctx, temp_reg, dst_reg, fetch_mask(&src->Register));
2205 if (r)
2206 return r;
2207 return 0;
2208 }
2209
2210 static int fetch_tcs_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg)
2211 {
2212 int r;
2213 unsigned temp_reg = r600_get_temp(ctx);
2214
2215 /* t.x = ips * r0.y */
2216 r = single_alu_op2(ctx, ALU_OP2_MUL_UINT24,
2217 temp_reg, 0,
2218 ctx->tess_input_info, 0,
2219 0, 1);
2220
2221 if (r)
2222 return r;
2223
2224 /* the base address is now in temp.x */
2225 r = r600_get_byte_address(ctx, temp_reg,
2226 NULL, src, ctx->tess_input_info, 1);
2227 if (r)
2228 return r;
2229
2230 r = do_lds_fetch_values(ctx, temp_reg, dst_reg, fetch_mask(&src->Register));
2231 if (r)
2232 return r;
2233 return 0;
2234 }
2235
2236 static int fetch_tcs_output(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg)
2237 {
2238 int r;
2239 unsigned temp_reg = r600_get_temp(ctx);
2240
2241 r = get_lds_offset0(ctx, 1, temp_reg,
2242 src->Register.Dimension ? false : true);
2243 if (r)
2244 return r;
2245 /* the base address is now in temp.x */
2246 r = r600_get_byte_address(ctx, temp_reg,
2247 NULL, src,
2248 ctx->tess_output_info, 1);
2249 if (r)
2250 return r;
2251
2252 r = do_lds_fetch_values(ctx, temp_reg, dst_reg, fetch_mask(&src->Register));
2253 if (r)
2254 return r;
2255 return 0;
2256 }
2257
2258 static int tgsi_split_lds_inputs(struct r600_shader_ctx *ctx)
2259 {
2260 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2261 unsigned i;
2262
2263 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
2264 struct tgsi_full_src_register *src = &inst->Src[i];
2265
2266 if (ctx->type == PIPE_SHADER_TESS_EVAL && src->Register.File == TGSI_FILE_INPUT) {
2267 int treg = r600_get_temp(ctx);
2268 fetch_tes_input(ctx, src, treg);
2269 ctx->src[i].sel = treg;
2270 ctx->src[i].rel = 0;
2271 }
2272 if (ctx->type == PIPE_SHADER_TESS_CTRL && src->Register.File == TGSI_FILE_INPUT) {
2273 int treg = r600_get_temp(ctx);
2274 fetch_tcs_input(ctx, src, treg);
2275 ctx->src[i].sel = treg;
2276 ctx->src[i].rel = 0;
2277 }
2278 if (ctx->type == PIPE_SHADER_TESS_CTRL && src->Register.File == TGSI_FILE_OUTPUT) {
2279 int treg = r600_get_temp(ctx);
2280 fetch_tcs_output(ctx, src, treg);
2281 ctx->src[i].sel = treg;
2282 ctx->src[i].rel = 0;
2283 }
2284 }
2285 return 0;
2286 }
2287
2288 static int tgsi_split_constant(struct r600_shader_ctx *ctx)
2289 {
2290 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2291 struct r600_bytecode_alu alu;
2292 int i, j, k, nconst, r;
2293
2294 for (i = 0, nconst = 0; i < inst->Instruction.NumSrcRegs; i++) {
2295 if (inst->Src[i].Register.File == TGSI_FILE_CONSTANT) {
2296 nconst++;
2297 }
2298 tgsi_src(ctx, &inst->Src[i], &ctx->src[i]);
2299 }
2300 for (i = 0, j = nconst - 1; i < inst->Instruction.NumSrcRegs; i++) {
2301 if (inst->Src[i].Register.File != TGSI_FILE_CONSTANT) {
2302 continue;
2303 }
2304
2305 if (ctx->src[i].rel) {
2306 int chan = inst->Src[i].Indirect.Swizzle;
2307 int treg = r600_get_temp(ctx);
2308 if ((r = tgsi_fetch_rel_const(ctx, ctx->src[i].kc_bank, ctx->src[i].kc_rel, ctx->src[i].sel - 512, chan, treg)))
2309 return r;
2310
2311 ctx->src[i].kc_bank = 0;
2312 ctx->src[i].kc_rel = 0;
2313 ctx->src[i].sel = treg;
2314 ctx->src[i].rel = 0;
2315 j--;
2316 } else if (j > 0) {
2317 int treg = r600_get_temp(ctx);
2318 for (k = 0; k < 4; k++) {
2319 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2320 alu.op = ALU_OP1_MOV;
2321 alu.src[0].sel = ctx->src[i].sel;
2322 alu.src[0].chan = k;
2323 alu.src[0].rel = ctx->src[i].rel;
2324 alu.src[0].kc_bank = ctx->src[i].kc_bank;
2325 alu.src[0].kc_rel = ctx->src[i].kc_rel;
2326 alu.dst.sel = treg;
2327 alu.dst.chan = k;
2328 alu.dst.write = 1;
2329 if (k == 3)
2330 alu.last = 1;
2331 r = r600_bytecode_add_alu(ctx->bc, &alu);
2332 if (r)
2333 return r;
2334 }
2335 ctx->src[i].sel = treg;
2336 ctx->src[i].rel =0;
2337 j--;
2338 }
2339 }
2340 return 0;
2341 }
2342
2343 /* need to move any immediate into a temp - for trig functions which use literal for PI stuff */
2344 static int tgsi_split_literal_constant(struct r600_shader_ctx *ctx)
2345 {
2346 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2347 struct r600_bytecode_alu alu;
2348 int i, j, k, nliteral, r;
2349
2350 for (i = 0, nliteral = 0; i < inst->Instruction.NumSrcRegs; i++) {
2351 if (ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
2352 nliteral++;
2353 }
2354 }
2355 for (i = 0, j = nliteral - 1; i < inst->Instruction.NumSrcRegs; i++) {
2356 if (j > 0 && ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
2357 int treg = r600_get_temp(ctx);
2358 for (k = 0; k < 4; k++) {
2359 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2360 alu.op = ALU_OP1_MOV;
2361 alu.src[0].sel = ctx->src[i].sel;
2362 alu.src[0].chan = k;
2363 alu.src[0].value = ctx->src[i].value[k];
2364 alu.dst.sel = treg;
2365 alu.dst.chan = k;
2366 alu.dst.write = 1;
2367 if (k == 3)
2368 alu.last = 1;
2369 r = r600_bytecode_add_alu(ctx->bc, &alu);
2370 if (r)
2371 return r;
2372 }
2373 ctx->src[i].sel = treg;
2374 j--;
2375 }
2376 }
2377 return 0;
2378 }
2379
2380 static int process_twoside_color_inputs(struct r600_shader_ctx *ctx)
2381 {
2382 int i, r, count = ctx->shader->ninput;
2383
2384 for (i = 0; i < count; i++) {
2385 if (ctx->shader->input[i].name == TGSI_SEMANTIC_COLOR) {
2386 r = select_twoside_color(ctx, i, ctx->shader->input[i].back_color_input);
2387 if (r)
2388 return r;
2389 }
2390 }
2391 return 0;
2392 }
2393
2394 static int emit_streamout(struct r600_shader_ctx *ctx, struct pipe_stream_output_info *so,
2395 int stream, unsigned *stream_item_size UNUSED)
2396 {
2397 unsigned so_gpr[PIPE_MAX_SHADER_OUTPUTS];
2398 unsigned start_comp[PIPE_MAX_SHADER_OUTPUTS];
2399 int j, r;
2400 unsigned i;
2401
2402 /* Sanity checking. */
2403 if (so->num_outputs > PIPE_MAX_SO_OUTPUTS) {
2404 R600_ERR("Too many stream outputs: %d\n", so->num_outputs);
2405 r = -EINVAL;
2406 goto out_err;
2407 }
2408 for (i = 0; i < so->num_outputs; i++) {
2409 if (so->output[i].output_buffer >= 4) {
2410 R600_ERR("Exceeded the max number of stream output buffers, got: %d\n",
2411 so->output[i].output_buffer);
2412 r = -EINVAL;
2413 goto out_err;
2414 }
2415 }
2416
2417 /* Initialize locations where the outputs are stored. */
2418 for (i = 0; i < so->num_outputs; i++) {
2419
2420 so_gpr[i] = ctx->shader->output[so->output[i].register_index].gpr;
2421 start_comp[i] = so->output[i].start_component;
2422 /* Lower outputs with dst_offset < start_component.
2423 *
2424 * We can only output 4D vectors with a write mask, e.g. we can
2425 * only output the W component at offset 3, etc. If we want
2426 * to store Y, Z, or W at buffer offset 0, we need to use MOV
2427 * to move it to X and output X. */
2428 if (so->output[i].dst_offset < so->output[i].start_component) {
2429 unsigned tmp = r600_get_temp(ctx);
2430
2431 for (j = 0; j < so->output[i].num_components; j++) {
2432 struct r600_bytecode_alu alu;
2433 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2434 alu.op = ALU_OP1_MOV;
2435 alu.src[0].sel = so_gpr[i];
2436 alu.src[0].chan = so->output[i].start_component + j;
2437
2438 alu.dst.sel = tmp;
2439 alu.dst.chan = j;
2440 alu.dst.write = 1;
2441 if (j == so->output[i].num_components - 1)
2442 alu.last = 1;
2443 r = r600_bytecode_add_alu(ctx->bc, &alu);
2444 if (r)
2445 return r;
2446 }
2447 start_comp[i] = 0;
2448 so_gpr[i] = tmp;
2449 }
2450 }
2451
2452 /* Write outputs to buffers. */
2453 for (i = 0; i < so->num_outputs; i++) {
2454 struct r600_bytecode_output output;
2455
2456 if (stream != -1 && stream != so->output[i].stream)
2457 continue;
2458
2459 memset(&output, 0, sizeof(struct r600_bytecode_output));
2460 output.gpr = so_gpr[i];
2461 output.elem_size = so->output[i].num_components - 1;
2462 if (output.elem_size == 2)
2463 output.elem_size = 3; // 3 not supported, write 4 with junk at end
2464 output.array_base = so->output[i].dst_offset - start_comp[i];
2465 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE;
2466 output.burst_count = 1;
2467 /* array_size is an upper limit for the burst_count
2468 * with MEM_STREAM instructions */
2469 output.array_size = 0xFFF;
2470 output.comp_mask = ((1 << so->output[i].num_components) - 1) << start_comp[i];
2471
2472 if (ctx->bc->chip_class >= EVERGREEN) {
2473 switch (so->output[i].output_buffer) {
2474 case 0:
2475 output.op = CF_OP_MEM_STREAM0_BUF0;
2476 break;
2477 case 1:
2478 output.op = CF_OP_MEM_STREAM0_BUF1;
2479 break;
2480 case 2:
2481 output.op = CF_OP_MEM_STREAM0_BUF2;
2482 break;
2483 case 3:
2484 output.op = CF_OP_MEM_STREAM0_BUF3;
2485 break;
2486 }
2487 output.op += so->output[i].stream * 4;
2488 assert(output.op >= CF_OP_MEM_STREAM0_BUF0 && output.op <= CF_OP_MEM_STREAM3_BUF3);
2489 ctx->enabled_stream_buffers_mask |= (1 << so->output[i].output_buffer) << so->output[i].stream * 4;
2490 } else {
2491 switch (so->output[i].output_buffer) {
2492 case 0:
2493 output.op = CF_OP_MEM_STREAM0;
2494 break;
2495 case 1:
2496 output.op = CF_OP_MEM_STREAM1;
2497 break;
2498 case 2:
2499 output.op = CF_OP_MEM_STREAM2;
2500 break;
2501 case 3:
2502 output.op = CF_OP_MEM_STREAM3;
2503 break;
2504 }
2505 ctx->enabled_stream_buffers_mask |= 1 << so->output[i].output_buffer;
2506 }
2507 r = r600_bytecode_add_output(ctx->bc, &output);
2508 if (r)
2509 goto out_err;
2510 }
2511 return 0;
2512 out_err:
2513 return r;
2514 }
2515
2516 static void convert_edgeflag_to_int(struct r600_shader_ctx *ctx)
2517 {
2518 struct r600_bytecode_alu alu;
2519 unsigned reg;
2520
2521 if (!ctx->shader->vs_out_edgeflag)
2522 return;
2523
2524 reg = ctx->shader->output[ctx->edgeflag_output].gpr;
2525
2526 /* clamp(x, 0, 1) */
2527 memset(&alu, 0, sizeof(alu));
2528 alu.op = ALU_OP1_MOV;
2529 alu.src[0].sel = reg;
2530 alu.dst.sel = reg;
2531 alu.dst.write = 1;
2532 alu.dst.clamp = 1;
2533 alu.last = 1;
2534 r600_bytecode_add_alu(ctx->bc, &alu);
2535
2536 memset(&alu, 0, sizeof(alu));
2537 alu.op = ALU_OP1_FLT_TO_INT;
2538 alu.src[0].sel = reg;
2539 alu.dst.sel = reg;
2540 alu.dst.write = 1;
2541 alu.last = 1;
2542 r600_bytecode_add_alu(ctx->bc, &alu);
2543 }
2544
2545 int generate_gs_copy_shader(struct r600_context *rctx,
2546 struct r600_pipe_shader *gs,
2547 struct pipe_stream_output_info *so)
2548 {
2549 struct r600_shader_ctx ctx = {};
2550 struct r600_shader *gs_shader = &gs->shader;
2551 struct r600_pipe_shader *cshader;
2552 unsigned ocnt = gs_shader->noutput;
2553 struct r600_bytecode_alu alu;
2554 struct r600_bytecode_vtx vtx;
2555 struct r600_bytecode_output output;
2556 struct r600_bytecode_cf *cf_jump, *cf_pop,
2557 *last_exp_pos = NULL, *last_exp_param = NULL;
2558 int next_clip_pos = 61, next_param = 0;
2559 unsigned i, j;
2560 int ring;
2561 bool only_ring_0 = true;
2562 cshader = calloc(1, sizeof(struct r600_pipe_shader));
2563 if (!cshader)
2564 return 0;
2565
2566 memcpy(cshader->shader.output, gs_shader->output, ocnt *
2567 sizeof(struct r600_shader_io));
2568
2569 cshader->shader.noutput = ocnt;
2570
2571 ctx.shader = &cshader->shader;
2572 ctx.bc = &ctx.shader->bc;
2573 ctx.type = ctx.bc->type = PIPE_SHADER_VERTEX;
2574
2575 r600_bytecode_init(ctx.bc, rctx->b.chip_class, rctx->b.family,
2576 rctx->screen->has_compressed_msaa_texturing);
2577
2578 ctx.bc->isa = rctx->isa;
2579
2580 cf_jump = NULL;
2581 memset(cshader->shader.ring_item_sizes, 0, sizeof(cshader->shader.ring_item_sizes));
2582
2583 /* R0.x = R0.x & 0x3fffffff */
2584 memset(&alu, 0, sizeof(alu));
2585 alu.op = ALU_OP2_AND_INT;
2586 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2587 alu.src[1].value = 0x3fffffff;
2588 alu.dst.write = 1;
2589 r600_bytecode_add_alu(ctx.bc, &alu);
2590
2591 /* R0.y = R0.x >> 30 */
2592 memset(&alu, 0, sizeof(alu));
2593 alu.op = ALU_OP2_LSHR_INT;
2594 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2595 alu.src[1].value = 0x1e;
2596 alu.dst.chan = 1;
2597 alu.dst.write = 1;
2598 alu.last = 1;
2599 r600_bytecode_add_alu(ctx.bc, &alu);
2600
2601 /* fetch vertex data from GSVS ring */
2602 for (i = 0; i < ocnt; ++i) {
2603 struct r600_shader_io *out = &ctx.shader->output[i];
2604
2605 out->gpr = i + 1;
2606 out->ring_offset = i * 16;
2607
2608 memset(&vtx, 0, sizeof(vtx));
2609 vtx.op = FETCH_OP_VFETCH;
2610 vtx.buffer_id = R600_GS_RING_CONST_BUFFER;
2611 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
2612 vtx.mega_fetch_count = 16;
2613 vtx.offset = out->ring_offset;
2614 vtx.dst_gpr = out->gpr;
2615 vtx.src_gpr = 0;
2616 vtx.dst_sel_x = 0;
2617 vtx.dst_sel_y = 1;
2618 vtx.dst_sel_z = 2;
2619 vtx.dst_sel_w = 3;
2620 if (rctx->b.chip_class >= EVERGREEN) {
2621 vtx.use_const_fields = 1;
2622 } else {
2623 vtx.data_format = FMT_32_32_32_32_FLOAT;
2624 }
2625
2626 r600_bytecode_add_vtx(ctx.bc, &vtx);
2627 }
2628 ctx.temp_reg = i + 1;
2629 for (ring = 3; ring >= 0; --ring) {
2630 bool enabled = false;
2631 for (i = 0; i < so->num_outputs; i++) {
2632 if (so->output[i].stream == ring) {
2633 enabled = true;
2634 if (ring > 0)
2635 only_ring_0 = false;
2636 break;
2637 }
2638 }
2639 if (ring != 0 && !enabled) {
2640 cshader->shader.ring_item_sizes[ring] = 0;
2641 continue;
2642 }
2643
2644 if (cf_jump) {
2645 // Patch up jump label
2646 r600_bytecode_add_cfinst(ctx.bc, CF_OP_POP);
2647 cf_pop = ctx.bc->cf_last;
2648
2649 cf_jump->cf_addr = cf_pop->id + 2;
2650 cf_jump->pop_count = 1;
2651 cf_pop->cf_addr = cf_pop->id + 2;
2652 cf_pop->pop_count = 1;
2653 }
2654
2655 /* PRED_SETE_INT __, R0.y, ring */
2656 memset(&alu, 0, sizeof(alu));
2657 alu.op = ALU_OP2_PRED_SETE_INT;
2658 alu.src[0].chan = 1;
2659 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2660 alu.src[1].value = ring;
2661 alu.execute_mask = 1;
2662 alu.update_pred = 1;
2663 alu.last = 1;
2664 r600_bytecode_add_alu_type(ctx.bc, &alu, CF_OP_ALU_PUSH_BEFORE);
2665
2666 r600_bytecode_add_cfinst(ctx.bc, CF_OP_JUMP);
2667 cf_jump = ctx.bc->cf_last;
2668
2669 if (enabled)
2670 emit_streamout(&ctx, so, only_ring_0 ? -1 : ring, &cshader->shader.ring_item_sizes[ring]);
2671 cshader->shader.ring_item_sizes[ring] = ocnt * 16;
2672 }
2673
2674 /* bc adds nops - copy it */
2675 if (ctx.bc->chip_class == R600) {
2676 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2677 alu.op = ALU_OP0_NOP;
2678 alu.last = 1;
2679 r600_bytecode_add_alu(ctx.bc, &alu);
2680
2681 r600_bytecode_add_cfinst(ctx.bc, CF_OP_NOP);
2682 }
2683
2684 /* export vertex data */
2685 /* XXX factor out common code with r600_shader_from_tgsi ? */
2686 for (i = 0; i < ocnt; ++i) {
2687 struct r600_shader_io *out = &ctx.shader->output[i];
2688 bool instream0 = true;
2689 if (out->name == TGSI_SEMANTIC_CLIPVERTEX)
2690 continue;
2691
2692 for (j = 0; j < so->num_outputs; j++) {
2693 if (so->output[j].register_index == i) {
2694 if (so->output[j].stream == 0)
2695 break;
2696 if (so->output[j].stream > 0)
2697 instream0 = false;
2698 }
2699 }
2700 if (!instream0)
2701 continue;
2702 memset(&output, 0, sizeof(output));
2703 output.gpr = out->gpr;
2704 output.elem_size = 3;
2705 output.swizzle_x = 0;
2706 output.swizzle_y = 1;
2707 output.swizzle_z = 2;
2708 output.swizzle_w = 3;
2709 output.burst_count = 1;
2710 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2711 output.op = CF_OP_EXPORT;
2712 switch (out->name) {
2713 case TGSI_SEMANTIC_POSITION:
2714 output.array_base = 60;
2715 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2716 break;
2717
2718 case TGSI_SEMANTIC_PSIZE:
2719 output.array_base = 61;
2720 if (next_clip_pos == 61)
2721 next_clip_pos = 62;
2722 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2723 output.swizzle_y = 7;
2724 output.swizzle_z = 7;
2725 output.swizzle_w = 7;
2726 ctx.shader->vs_out_misc_write = 1;
2727 ctx.shader->vs_out_point_size = 1;
2728 break;
2729 case TGSI_SEMANTIC_LAYER:
2730 if (out->spi_sid) {
2731 /* duplicate it as PARAM to pass to the pixel shader */
2732 output.array_base = next_param++;
2733 r600_bytecode_add_output(ctx.bc, &output);
2734 last_exp_param = ctx.bc->cf_last;
2735 }
2736 output.array_base = 61;
2737 if (next_clip_pos == 61)
2738 next_clip_pos = 62;
2739 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2740 output.swizzle_x = 7;
2741 output.swizzle_y = 7;
2742 output.swizzle_z = 0;
2743 output.swizzle_w = 7;
2744 ctx.shader->vs_out_misc_write = 1;
2745 ctx.shader->vs_out_layer = 1;
2746 break;
2747 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2748 if (out->spi_sid) {
2749 /* duplicate it as PARAM to pass to the pixel shader */
2750 output.array_base = next_param++;
2751 r600_bytecode_add_output(ctx.bc, &output);
2752 last_exp_param = ctx.bc->cf_last;
2753 }
2754 output.array_base = 61;
2755 if (next_clip_pos == 61)
2756 next_clip_pos = 62;
2757 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2758 ctx.shader->vs_out_misc_write = 1;
2759 ctx.shader->vs_out_viewport = 1;
2760 output.swizzle_x = 7;
2761 output.swizzle_y = 7;
2762 output.swizzle_z = 7;
2763 output.swizzle_w = 0;
2764 break;
2765 case TGSI_SEMANTIC_CLIPDIST:
2766 /* spi_sid is 0 for clipdistance outputs that were generated
2767 * for clipvertex - we don't need to pass them to PS */
2768 ctx.shader->clip_dist_write = gs->shader.clip_dist_write;
2769 ctx.shader->cull_dist_write = gs->shader.cull_dist_write;
2770 ctx.shader->cc_dist_mask = gs->shader.cc_dist_mask;
2771 if (out->spi_sid) {
2772 /* duplicate it as PARAM to pass to the pixel shader */
2773 output.array_base = next_param++;
2774 r600_bytecode_add_output(ctx.bc, &output);
2775 last_exp_param = ctx.bc->cf_last;
2776 }
2777 output.array_base = next_clip_pos++;
2778 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2779 break;
2780 case TGSI_SEMANTIC_FOG:
2781 output.swizzle_y = 4; /* 0 */
2782 output.swizzle_z = 4; /* 0 */
2783 output.swizzle_w = 5; /* 1 */
2784 break;
2785 default:
2786 output.array_base = next_param++;
2787 break;
2788 }
2789 r600_bytecode_add_output(ctx.bc, &output);
2790 if (output.type == V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM)
2791 last_exp_param = ctx.bc->cf_last;
2792 else
2793 last_exp_pos = ctx.bc->cf_last;
2794 }
2795
2796 if (!last_exp_pos) {
2797 memset(&output, 0, sizeof(output));
2798 output.gpr = 0;
2799 output.elem_size = 3;
2800 output.swizzle_x = 7;
2801 output.swizzle_y = 7;
2802 output.swizzle_z = 7;
2803 output.swizzle_w = 7;
2804 output.burst_count = 1;
2805 output.type = 2;
2806 output.op = CF_OP_EXPORT;
2807 output.array_base = 60;
2808 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2809 r600_bytecode_add_output(ctx.bc, &output);
2810 last_exp_pos = ctx.bc->cf_last;
2811 }
2812
2813 if (!last_exp_param) {
2814 memset(&output, 0, sizeof(output));
2815 output.gpr = 0;
2816 output.elem_size = 3;
2817 output.swizzle_x = 7;
2818 output.swizzle_y = 7;
2819 output.swizzle_z = 7;
2820 output.swizzle_w = 7;
2821 output.burst_count = 1;
2822 output.type = 2;
2823 output.op = CF_OP_EXPORT;
2824 output.array_base = next_param++;
2825 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2826 r600_bytecode_add_output(ctx.bc, &output);
2827 last_exp_param = ctx.bc->cf_last;
2828 }
2829
2830 last_exp_pos->op = CF_OP_EXPORT_DONE;
2831 last_exp_param->op = CF_OP_EXPORT_DONE;
2832
2833 r600_bytecode_add_cfinst(ctx.bc, CF_OP_POP);
2834 cf_pop = ctx.bc->cf_last;
2835
2836 cf_jump->cf_addr = cf_pop->id + 2;
2837 cf_jump->pop_count = 1;
2838 cf_pop->cf_addr = cf_pop->id + 2;
2839 cf_pop->pop_count = 1;
2840
2841 if (ctx.bc->chip_class == CAYMAN)
2842 cm_bytecode_add_cf_end(ctx.bc);
2843 else {
2844 r600_bytecode_add_cfinst(ctx.bc, CF_OP_NOP);
2845 ctx.bc->cf_last->end_of_program = 1;
2846 }
2847
2848 gs->gs_copy_shader = cshader;
2849 cshader->enabled_stream_buffers_mask = ctx.enabled_stream_buffers_mask;
2850
2851 ctx.bc->nstack = 1;
2852
2853 return r600_bytecode_build(ctx.bc);
2854 }
2855
2856 static int emit_inc_ring_offset(struct r600_shader_ctx *ctx, int idx, bool ind)
2857 {
2858 if (ind) {
2859 struct r600_bytecode_alu alu;
2860 int r;
2861
2862 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2863 alu.op = ALU_OP2_ADD_INT;
2864 alu.src[0].sel = ctx->gs_export_gpr_tregs[idx];
2865 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2866 alu.src[1].value = ctx->gs_out_ring_offset >> 4;
2867 alu.dst.sel = ctx->gs_export_gpr_tregs[idx];
2868 alu.dst.write = 1;
2869 alu.last = 1;
2870 r = r600_bytecode_add_alu(ctx->bc, &alu);
2871 if (r)
2872 return r;
2873 }
2874 return 0;
2875 }
2876
2877 static int emit_gs_ring_writes(struct r600_shader_ctx *ctx, const struct pipe_stream_output_info *so UNUSED, int stream, bool ind)
2878 {
2879 struct r600_bytecode_output output;
2880 int ring_offset;
2881 unsigned i, k;
2882 int effective_stream = stream == -1 ? 0 : stream;
2883 int idx = 0;
2884
2885 for (i = 0; i < ctx->shader->noutput; i++) {
2886 if (ctx->gs_for_vs) {
2887 /* for ES we need to lookup corresponding ring offset expected by GS
2888 * (map this output to GS input by name and sid) */
2889 /* FIXME precompute offsets */
2890 ring_offset = -1;
2891 for(k = 0; k < ctx->gs_for_vs->ninput; ++k) {
2892 struct r600_shader_io *in = &ctx->gs_for_vs->input[k];
2893 struct r600_shader_io *out = &ctx->shader->output[i];
2894 if (in->name == out->name && in->sid == out->sid)
2895 ring_offset = in->ring_offset;
2896 }
2897
2898 if (ring_offset == -1)
2899 continue;
2900 } else {
2901 ring_offset = idx * 16;
2902 idx++;
2903 }
2904
2905 if (stream > 0 && ctx->shader->output[i].name == TGSI_SEMANTIC_POSITION)
2906 continue;
2907 /* next_ring_offset after parsing input decls contains total size of
2908 * single vertex data, gs_next_vertex - current vertex index */
2909 if (!ind)
2910 ring_offset += ctx->gs_out_ring_offset * ctx->gs_next_vertex;
2911
2912 memset(&output, 0, sizeof(struct r600_bytecode_output));
2913 output.gpr = ctx->shader->output[i].gpr;
2914 output.elem_size = 3;
2915 output.comp_mask = 0xF;
2916 output.burst_count = 1;
2917
2918 if (ind)
2919 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND;
2920 else
2921 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE;
2922
2923 switch (stream) {
2924 default:
2925 case 0:
2926 output.op = CF_OP_MEM_RING; break;
2927 case 1:
2928 output.op = CF_OP_MEM_RING1; break;
2929 case 2:
2930 output.op = CF_OP_MEM_RING2; break;
2931 case 3:
2932 output.op = CF_OP_MEM_RING3; break;
2933 }
2934
2935 if (ind) {
2936 output.array_base = ring_offset >> 2; /* in dwords */
2937 output.array_size = 0xfff;
2938 output.index_gpr = ctx->gs_export_gpr_tregs[effective_stream];
2939 } else
2940 output.array_base = ring_offset >> 2; /* in dwords */
2941 r600_bytecode_add_output(ctx->bc, &output);
2942 }
2943
2944 ++ctx->gs_next_vertex;
2945 return 0;
2946 }
2947
2948
2949 static int r600_fetch_tess_io_info(struct r600_shader_ctx *ctx)
2950 {
2951 int r;
2952 struct r600_bytecode_vtx vtx;
2953 int temp_val = ctx->temp_reg;
2954 /* need to store the TCS output somewhere */
2955 r = single_alu_op2(ctx, ALU_OP1_MOV,
2956 temp_val, 0,
2957 V_SQ_ALU_SRC_LITERAL, 0,
2958 0, 0);
2959 if (r)
2960 return r;
2961
2962 /* used by VS/TCS */
2963 if (ctx->tess_input_info) {
2964 /* fetch tcs input values into resv space */
2965 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
2966 vtx.op = FETCH_OP_VFETCH;
2967 vtx.buffer_id = R600_LDS_INFO_CONST_BUFFER;
2968 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
2969 vtx.mega_fetch_count = 16;
2970 vtx.data_format = FMT_32_32_32_32;
2971 vtx.num_format_all = 2;
2972 vtx.format_comp_all = 1;
2973 vtx.use_const_fields = 0;
2974 vtx.endian = r600_endian_swap(32);
2975 vtx.srf_mode_all = 1;
2976 vtx.offset = 0;
2977 vtx.dst_gpr = ctx->tess_input_info;
2978 vtx.dst_sel_x = 0;
2979 vtx.dst_sel_y = 1;
2980 vtx.dst_sel_z = 2;
2981 vtx.dst_sel_w = 3;
2982 vtx.src_gpr = temp_val;
2983 vtx.src_sel_x = 0;
2984
2985 r = r600_bytecode_add_vtx(ctx->bc, &vtx);
2986 if (r)
2987 return r;
2988 }
2989
2990 /* used by TCS/TES */
2991 if (ctx->tess_output_info) {
2992 /* fetch tcs output values into resv space */
2993 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
2994 vtx.op = FETCH_OP_VFETCH;
2995 vtx.buffer_id = R600_LDS_INFO_CONST_BUFFER;
2996 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
2997 vtx.mega_fetch_count = 16;
2998 vtx.data_format = FMT_32_32_32_32;
2999 vtx.num_format_all = 2;
3000 vtx.format_comp_all = 1;
3001 vtx.use_const_fields = 0;
3002 vtx.endian = r600_endian_swap(32);
3003 vtx.srf_mode_all = 1;
3004 vtx.offset = 16;
3005 vtx.dst_gpr = ctx->tess_output_info;
3006 vtx.dst_sel_x = 0;
3007 vtx.dst_sel_y = 1;
3008 vtx.dst_sel_z = 2;
3009 vtx.dst_sel_w = 3;
3010 vtx.src_gpr = temp_val;
3011 vtx.src_sel_x = 0;
3012
3013 r = r600_bytecode_add_vtx(ctx->bc, &vtx);
3014 if (r)
3015 return r;
3016 }
3017 return 0;
3018 }
3019
3020 static int emit_lds_vs_writes(struct r600_shader_ctx *ctx)
3021 {
3022 int j, r;
3023 int temp_reg;
3024 unsigned i;
3025
3026 /* fetch tcs input values into input_vals */
3027 ctx->tess_input_info = r600_get_temp(ctx);
3028 ctx->tess_output_info = 0;
3029 r = r600_fetch_tess_io_info(ctx);
3030 if (r)
3031 return r;
3032
3033 temp_reg = r600_get_temp(ctx);
3034 /* dst reg contains LDS address stride * idx */
3035 /* MUL vertexID, vertex_dw_stride */
3036 r = single_alu_op2(ctx, ALU_OP2_MUL_UINT24,
3037 temp_reg, 0,
3038 ctx->tess_input_info, 1,
3039 0, 1); /* rel id in r0.y? */
3040 if (r)
3041 return r;
3042
3043 for (i = 0; i < ctx->shader->noutput; i++) {
3044 struct r600_bytecode_alu alu;
3045 int param = r600_get_lds_unique_index(ctx->shader->output[i].name,
3046 ctx->shader->output[i].sid);
3047
3048 if (param) {
3049 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
3050 temp_reg, 1,
3051 temp_reg, 0,
3052 V_SQ_ALU_SRC_LITERAL, param * 16);
3053 if (r)
3054 return r;
3055 }
3056
3057 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
3058 temp_reg, 2,
3059 temp_reg, param ? 1 : 0,
3060 V_SQ_ALU_SRC_LITERAL, 8);
3061 if (r)
3062 return r;
3063
3064
3065 for (j = 0; j < 2; j++) {
3066 int chan = (j == 1) ? 2 : (param ? 1 : 0);
3067 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3068 alu.op = LDS_OP3_LDS_WRITE_REL;
3069 alu.src[0].sel = temp_reg;
3070 alu.src[0].chan = chan;
3071 alu.src[1].sel = ctx->shader->output[i].gpr;
3072 alu.src[1].chan = j * 2;
3073 alu.src[2].sel = ctx->shader->output[i].gpr;
3074 alu.src[2].chan = (j * 2) + 1;
3075 alu.last = 1;
3076 alu.dst.chan = 0;
3077 alu.lds_idx = 1;
3078 alu.is_lds_idx_op = true;
3079 r = r600_bytecode_add_alu(ctx->bc, &alu);
3080 if (r)
3081 return r;
3082 }
3083 }
3084 return 0;
3085 }
3086
3087 static int r600_store_tcs_output(struct r600_shader_ctx *ctx)
3088 {
3089 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3090 const struct tgsi_full_dst_register *dst = &inst->Dst[0];
3091 int i, r, lasti;
3092 int temp_reg = r600_get_temp(ctx);
3093 struct r600_bytecode_alu alu;
3094 unsigned write_mask = dst->Register.WriteMask;
3095
3096 if (inst->Dst[0].Register.File != TGSI_FILE_OUTPUT)
3097 return 0;
3098
3099 r = get_lds_offset0(ctx, 1, temp_reg, dst->Register.Dimension ? false : true);
3100 if (r)
3101 return r;
3102
3103 /* the base address is now in temp.x */
3104 r = r600_get_byte_address(ctx, temp_reg,
3105 &inst->Dst[0], NULL, ctx->tess_output_info, 1);
3106 if (r)
3107 return r;
3108
3109 /* LDS write */
3110 lasti = tgsi_last_instruction(write_mask);
3111 for (i = 1; i <= lasti; i++) {
3112
3113 if (!(write_mask & (1 << i)))
3114 continue;
3115 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
3116 temp_reg, i,
3117 temp_reg, 0,
3118 V_SQ_ALU_SRC_LITERAL, 4 * i);
3119 if (r)
3120 return r;
3121 }
3122
3123 for (i = 0; i <= lasti; i++) {
3124 if (!(write_mask & (1 << i)))
3125 continue;
3126
3127 if ((i == 0 && ((write_mask & 3) == 3)) ||
3128 (i == 2 && ((write_mask & 0xc) == 0xc))) {
3129 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3130 alu.op = LDS_OP3_LDS_WRITE_REL;
3131 alu.src[0].sel = temp_reg;
3132 alu.src[0].chan = i;
3133
3134 alu.src[1].sel = dst->Register.Index;
3135 alu.src[1].sel += ctx->file_offset[dst->Register.File];
3136 alu.src[1].chan = i;
3137
3138 alu.src[2].sel = dst->Register.Index;
3139 alu.src[2].sel += ctx->file_offset[dst->Register.File];
3140 alu.src[2].chan = i + 1;
3141 alu.lds_idx = 1;
3142 alu.dst.chan = 0;
3143 alu.last = 1;
3144 alu.is_lds_idx_op = true;
3145 r = r600_bytecode_add_alu(ctx->bc, &alu);
3146 if (r)
3147 return r;
3148 i += 1;
3149 continue;
3150 }
3151 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3152 alu.op = LDS_OP2_LDS_WRITE;
3153 alu.src[0].sel = temp_reg;
3154 alu.src[0].chan = i;
3155
3156 alu.src[1].sel = dst->Register.Index;
3157 alu.src[1].sel += ctx->file_offset[dst->Register.File];
3158 alu.src[1].chan = i;
3159
3160 alu.src[2].sel = V_SQ_ALU_SRC_0;
3161 alu.dst.chan = 0;
3162 alu.last = 1;
3163 alu.is_lds_idx_op = true;
3164 r = r600_bytecode_add_alu(ctx->bc, &alu);
3165 if (r)
3166 return r;
3167 }
3168 return 0;
3169 }
3170
3171 static int r600_tess_factor_read(struct r600_shader_ctx *ctx,
3172 int output_idx, int nc)
3173 {
3174 int param;
3175 unsigned temp_reg = r600_get_temp(ctx);
3176 unsigned name = ctx->shader->output[output_idx].name;
3177 int dreg = ctx->shader->output[output_idx].gpr;
3178 int r;
3179
3180 param = r600_get_lds_unique_index(name, 0);
3181 r = get_lds_offset0(ctx, 1, temp_reg, true);
3182 if (r)
3183 return r;
3184
3185 if (param) {
3186 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
3187 temp_reg, 0,
3188 temp_reg, 0,
3189 V_SQ_ALU_SRC_LITERAL, param * 16);
3190 if (r)
3191 return r;
3192 }
3193
3194 do_lds_fetch_values(ctx, temp_reg, dreg, ((1u << nc) - 1));
3195 return 0;
3196 }
3197
3198 static int r600_emit_tess_factor(struct r600_shader_ctx *ctx)
3199 {
3200 int stride, outer_comps, inner_comps;
3201 int tessinner_idx = -1, tessouter_idx = -1;
3202 int i, r;
3203 unsigned j;
3204 int temp_reg = r600_get_temp(ctx);
3205 int treg[3] = {-1, -1, -1};
3206 struct r600_bytecode_alu alu;
3207 struct r600_bytecode_cf *cf_jump, *cf_pop;
3208
3209 /* only execute factor emission for invocation 0 */
3210 /* PRED_SETE_INT __, R0.x, 0 */
3211 memset(&alu, 0, sizeof(alu));
3212 alu.op = ALU_OP2_PRED_SETE_INT;
3213 alu.src[0].chan = 2;
3214 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
3215 alu.execute_mask = 1;
3216 alu.update_pred = 1;
3217 alu.last = 1;
3218 r600_bytecode_add_alu_type(ctx->bc, &alu, CF_OP_ALU_PUSH_BEFORE);
3219
3220 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP);
3221 cf_jump = ctx->bc->cf_last;
3222
3223 treg[0] = r600_get_temp(ctx);
3224 switch (ctx->shader->tcs_prim_mode) {
3225 case PIPE_PRIM_LINES:
3226 stride = 8; /* 2 dwords, 1 vec2 store */
3227 outer_comps = 2;
3228 inner_comps = 0;
3229 break;
3230 case PIPE_PRIM_TRIANGLES:
3231 stride = 16; /* 4 dwords, 1 vec4 store */
3232 outer_comps = 3;
3233 inner_comps = 1;
3234 treg[1] = r600_get_temp(ctx);
3235 break;
3236 case PIPE_PRIM_QUADS:
3237 stride = 24; /* 6 dwords, 2 stores (vec4 + vec2) */
3238 outer_comps = 4;
3239 inner_comps = 2;
3240 treg[1] = r600_get_temp(ctx);
3241 treg[2] = r600_get_temp(ctx);
3242 break;
3243 default:
3244 assert(0);
3245 return -1;
3246 }
3247
3248 /* R0 is InvocationID, RelPatchID, PatchID, tf_base */
3249 /* TF_WRITE takes index in R.x, value in R.y */
3250 for (j = 0; j < ctx->shader->noutput; j++) {
3251 if (ctx->shader->output[j].name == TGSI_SEMANTIC_TESSINNER)
3252 tessinner_idx = j;
3253 if (ctx->shader->output[j].name == TGSI_SEMANTIC_TESSOUTER)
3254 tessouter_idx = j;
3255 }
3256
3257 if (tessouter_idx == -1)
3258 return -1;
3259
3260 if (tessinner_idx == -1 && inner_comps)
3261 return -1;
3262
3263 if (tessouter_idx != -1) {
3264 r = r600_tess_factor_read(ctx, tessouter_idx, outer_comps);
3265 if (r)
3266 return r;
3267 }
3268
3269 if (tessinner_idx != -1) {
3270 r = r600_tess_factor_read(ctx, tessinner_idx, inner_comps);
3271 if (r)
3272 return r;
3273 }
3274
3275 /* r.x = tf_base(r0.w) + relpatchid(r0.y) * tf_stride */
3276 /* r.x = relpatchid(r0.y) * tf_stride */
3277
3278 /* multiply incoming r0.y * stride - t.x = r0.y * stride */
3279 /* add incoming r0.w to it: t.x = t.x + r0.w */
3280 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
3281 temp_reg, 0,
3282 0, 1,
3283 V_SQ_ALU_SRC_LITERAL, stride,
3284 0, 3);
3285 if (r)
3286 return r;
3287
3288 for (i = 0; i < outer_comps + inner_comps; i++) {
3289 int out_idx = i >= outer_comps ? tessinner_idx : tessouter_idx;
3290 int out_comp = i >= outer_comps ? i - outer_comps : i;
3291
3292 if (ctx->shader->tcs_prim_mode == PIPE_PRIM_LINES) {
3293 if (out_comp == 1)
3294 out_comp = 0;
3295 else if (out_comp == 0)
3296 out_comp = 1;
3297 }
3298
3299 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
3300 treg[i / 2], (2 * (i % 2)),
3301 temp_reg, 0,
3302 V_SQ_ALU_SRC_LITERAL, 4 * i);
3303 if (r)
3304 return r;
3305 r = single_alu_op2(ctx, ALU_OP1_MOV,
3306 treg[i / 2], 1 + (2 * (i%2)),
3307 ctx->shader->output[out_idx].gpr, out_comp,
3308 0, 0);
3309 if (r)
3310 return r;
3311 }
3312 for (i = 0; i < outer_comps + inner_comps; i++) {
3313 struct r600_bytecode_gds gds;
3314
3315 memset(&gds, 0, sizeof(struct r600_bytecode_gds));
3316 gds.src_gpr = treg[i / 2];
3317 gds.src_sel_x = 2 * (i % 2);
3318 gds.src_sel_y = 1 + (2 * (i % 2));
3319 gds.src_sel_z = 4;
3320 gds.dst_sel_x = 7;
3321 gds.dst_sel_y = 7;
3322 gds.dst_sel_z = 7;
3323 gds.dst_sel_w = 7;
3324 gds.op = FETCH_OP_TF_WRITE;
3325 r = r600_bytecode_add_gds(ctx->bc, &gds);
3326 if (r)
3327 return r;
3328 }
3329
3330 // Patch up jump label
3331 r600_bytecode_add_cfinst(ctx->bc, CF_OP_POP);
3332 cf_pop = ctx->bc->cf_last;
3333
3334 cf_jump->cf_addr = cf_pop->id + 2;
3335 cf_jump->pop_count = 1;
3336 cf_pop->cf_addr = cf_pop->id + 2;
3337 cf_pop->pop_count = 1;
3338
3339 return 0;
3340 }
3341
3342 /*
3343 * We have to work out the thread ID for load and atomic
3344 * operations, which store the returned value to an index
3345 * in an intermediate buffer.
3346 * The index is calculated by taking the thread id,
3347 * calculated from the MBCNT instructions.
3348 * Then the shader engine ID is multiplied by 256,
3349 * and the wave id is added.
3350 * Then the result is multipled by 64 and thread id is
3351 * added.
3352 */
3353 static int load_thread_id_gpr(struct r600_shader_ctx *ctx)
3354 {
3355 struct r600_bytecode_alu alu;
3356 int r;
3357
3358 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3359 alu.op = ALU_OP1_MBCNT_32LO_ACCUM_PREV_INT;
3360 alu.dst.sel = ctx->temp_reg;
3361 alu.dst.chan = 0;
3362 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
3363 alu.src[0].value = 0xffffffff;
3364 alu.dst.write = 1;
3365 r = r600_bytecode_add_alu(ctx->bc, &alu);
3366 if (r)
3367 return r;
3368
3369 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3370 alu.op = ALU_OP1_MBCNT_32HI_INT;
3371 alu.dst.sel = ctx->temp_reg;
3372 alu.dst.chan = 1;
3373 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
3374 alu.src[0].value = 0xffffffff;
3375 alu.dst.write = 1;
3376 r = r600_bytecode_add_alu(ctx->bc, &alu);
3377 if (r)
3378 return r;
3379
3380 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3381 alu.op = ALU_OP3_MULADD_UINT24;
3382 alu.dst.sel = ctx->temp_reg;
3383 alu.dst.chan = 2;
3384 alu.src[0].sel = EG_V_SQ_ALU_SRC_SE_ID;
3385 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
3386 alu.src[1].value = 256;
3387 alu.src[2].sel = EG_V_SQ_ALU_SRC_HW_WAVE_ID;
3388 alu.dst.write = 1;
3389 alu.is_op3 = 1;
3390 alu.last = 1;
3391 r = r600_bytecode_add_alu(ctx->bc, &alu);
3392 if (r)
3393 return r;
3394
3395 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
3396 ctx->thread_id_gpr, 1,
3397 ctx->temp_reg, 2,
3398 V_SQ_ALU_SRC_LITERAL, 0x40,
3399 ctx->temp_reg, 0);
3400 if (r)
3401 return r;
3402 return 0;
3403 }
3404
3405 static int r600_shader_from_tgsi(struct r600_context *rctx,
3406 struct r600_pipe_shader *pipeshader,
3407 union r600_shader_key key)
3408 {
3409 struct r600_screen *rscreen = rctx->screen;
3410 struct r600_shader *shader = &pipeshader->shader;
3411 struct tgsi_token *tokens = pipeshader->selector->tokens;
3412 struct pipe_stream_output_info so = pipeshader->selector->so;
3413 struct tgsi_full_immediate *immediate;
3414 struct r600_shader_ctx ctx;
3415 struct r600_bytecode_output output[ARRAY_SIZE(shader->output)];
3416 unsigned output_done, noutput;
3417 unsigned opcode;
3418 int j, k, r = 0;
3419 unsigned i;
3420 int next_param_base = 0, next_clip_base;
3421 int max_color_exports = MAX2(key.ps.nr_cbufs, 1);
3422 bool indirect_gprs;
3423 bool ring_outputs = false;
3424 bool lds_outputs = false;
3425 bool lds_inputs = false;
3426 bool pos_emitted = false;
3427
3428 ctx.bc = &shader->bc;
3429 ctx.shader = shader;
3430
3431 r600_bytecode_init(ctx.bc, rscreen->b.chip_class, rscreen->b.family,
3432 rscreen->has_compressed_msaa_texturing);
3433 ctx.tokens = tokens;
3434 tgsi_scan_shader(tokens, &ctx.info);
3435 shader->indirect_files = ctx.info.indirect_files;
3436
3437 int narrays = ctx.info.array_max[TGSI_FILE_TEMPORARY];
3438 ctx.array_infos = calloc(narrays, sizeof(*ctx.array_infos));
3439 ctx.spilled_arrays = calloc(narrays, sizeof(bool));
3440 tgsi_scan_arrays(tokens, TGSI_FILE_TEMPORARY, narrays, ctx.array_infos);
3441
3442 shader->uses_helper_invocation = false;
3443 shader->uses_doubles = ctx.info.uses_doubles;
3444 shader->uses_atomics = ctx.info.file_mask[TGSI_FILE_HW_ATOMIC];
3445 shader->nsys_inputs = 0;
3446
3447 shader->uses_images = ctx.info.file_count[TGSI_FILE_IMAGE] > 0 ||
3448 ctx.info.file_count[TGSI_FILE_BUFFER] > 0;
3449 indirect_gprs = ctx.info.indirect_files & ~((1 << TGSI_FILE_CONSTANT) | (1 << TGSI_FILE_SAMPLER));
3450 tgsi_parse_init(&ctx.parse, tokens);
3451 ctx.type = ctx.info.processor;
3452 shader->processor_type = ctx.type;
3453 ctx.bc->type = shader->processor_type;
3454
3455 switch (ctx.type) {
3456 case PIPE_SHADER_VERTEX:
3457 shader->vs_as_gs_a = key.vs.as_gs_a;
3458 shader->vs_as_es = key.vs.as_es;
3459 shader->vs_as_ls = key.vs.as_ls;
3460 shader->atomic_base = key.vs.first_atomic_counter;
3461 if (shader->vs_as_es)
3462 ring_outputs = true;
3463 if (shader->vs_as_ls)
3464 lds_outputs = true;
3465 break;
3466 case PIPE_SHADER_GEOMETRY:
3467 ring_outputs = true;
3468 shader->atomic_base = key.gs.first_atomic_counter;
3469 shader->gs_tri_strip_adj_fix = key.gs.tri_strip_adj_fix;
3470 break;
3471 case PIPE_SHADER_TESS_CTRL:
3472 shader->tcs_prim_mode = key.tcs.prim_mode;
3473 shader->atomic_base = key.tcs.first_atomic_counter;
3474 lds_outputs = true;
3475 lds_inputs = true;
3476 break;
3477 case PIPE_SHADER_TESS_EVAL:
3478 shader->tes_as_es = key.tes.as_es;
3479 shader->atomic_base = key.tes.first_atomic_counter;
3480 lds_inputs = true;
3481 if (shader->tes_as_es)
3482 ring_outputs = true;
3483 break;
3484 case PIPE_SHADER_FRAGMENT:
3485 shader->two_side = key.ps.color_two_side;
3486 shader->atomic_base = key.ps.first_atomic_counter;
3487 shader->rat_base = key.ps.nr_cbufs;
3488 shader->image_size_const_offset = key.ps.image_size_const_offset;
3489 break;
3490 case PIPE_SHADER_COMPUTE:
3491 shader->rat_base = 0;
3492 shader->image_size_const_offset = ctx.info.file_count[TGSI_FILE_SAMPLER];
3493 break;
3494 default:
3495 break;
3496 }
3497
3498 if (shader->vs_as_es || shader->tes_as_es) {
3499 ctx.gs_for_vs = &rctx->gs_shader->current->shader;
3500 } else {
3501 ctx.gs_for_vs = NULL;
3502 }
3503
3504 ctx.next_ring_offset = 0;
3505 ctx.gs_out_ring_offset = 0;
3506 ctx.gs_next_vertex = 0;
3507 ctx.gs_stream_output_info = &so;
3508
3509 ctx.thread_id_gpr = -1;
3510 ctx.face_gpr = -1;
3511 ctx.fixed_pt_position_gpr = -1;
3512 ctx.fragcoord_input = -1;
3513 ctx.colors_used = 0;
3514 ctx.clip_vertex_write = 0;
3515
3516 ctx.helper_invoc_reg = -1;
3517 ctx.cs_block_size_reg = -1;
3518 ctx.cs_grid_size_reg = -1;
3519 ctx.cs_block_size_loaded = false;
3520 ctx.cs_grid_size_loaded = false;
3521
3522 shader->nr_ps_color_exports = 0;
3523 shader->nr_ps_max_color_exports = 0;
3524
3525
3526 /* register allocations */
3527 /* Values [0,127] correspond to GPR[0..127].
3528 * Values [128,159] correspond to constant buffer bank 0
3529 * Values [160,191] correspond to constant buffer bank 1
3530 * Values [256,511] correspond to cfile constants c[0..255]. (Gone on EG)
3531 * Values [256,287] correspond to constant buffer bank 2 (EG)
3532 * Values [288,319] correspond to constant buffer bank 3 (EG)
3533 * Other special values are shown in the list below.
3534 * 244 ALU_SRC_1_DBL_L: special constant 1.0 double-float, LSW. (RV670+)
3535 * 245 ALU_SRC_1_DBL_M: special constant 1.0 double-float, MSW. (RV670+)
3536 * 246 ALU_SRC_0_5_DBL_L: special constant 0.5 double-float, LSW. (RV670+)
3537 * 247 ALU_SRC_0_5_DBL_M: special constant 0.5 double-float, MSW. (RV670+)
3538 * 248 SQ_ALU_SRC_0: special constant 0.0.
3539 * 249 SQ_ALU_SRC_1: special constant 1.0 float.
3540 * 250 SQ_ALU_SRC_1_INT: special constant 1 integer.
3541 * 251 SQ_ALU_SRC_M_1_INT: special constant -1 integer.
3542 * 252 SQ_ALU_SRC_0_5: special constant 0.5 float.
3543 * 253 SQ_ALU_SRC_LITERAL: literal constant.
3544 * 254 SQ_ALU_SRC_PV: previous vector result.
3545 * 255 SQ_ALU_SRC_PS: previous scalar result.
3546 */
3547 for (i = 0; i < TGSI_FILE_COUNT; i++) {
3548 ctx.file_offset[i] = 0;
3549 }
3550
3551 if (ctx.type == PIPE_SHADER_VERTEX) {
3552
3553 ctx.file_offset[TGSI_FILE_INPUT] = 1;
3554 if (ctx.info.num_inputs)
3555 r600_bytecode_add_cfinst(ctx.bc, CF_OP_CALL_FS);
3556 }
3557 if (ctx.type == PIPE_SHADER_FRAGMENT) {
3558 if (ctx.bc->chip_class >= EVERGREEN)
3559 ctx.file_offset[TGSI_FILE_INPUT] = evergreen_gpr_count(&ctx);
3560 else
3561 ctx.file_offset[TGSI_FILE_INPUT] = allocate_system_value_inputs(&ctx, ctx.file_offset[TGSI_FILE_INPUT]);
3562
3563 for (i = 0; i < PIPE_MAX_SHADER_INPUTS; i++) {
3564 if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_HELPER_INVOCATION) {
3565 ctx.helper_invoc_reg = ctx.file_offset[TGSI_FILE_INPUT]++;
3566 shader->uses_helper_invocation = true;
3567 }
3568 }
3569 }
3570 if (ctx.type == PIPE_SHADER_GEOMETRY) {
3571 /* FIXME 1 would be enough in some cases (3 or less input vertices) */
3572 ctx.file_offset[TGSI_FILE_INPUT] = 2;
3573 }
3574 if (ctx.type == PIPE_SHADER_TESS_CTRL)
3575 ctx.file_offset[TGSI_FILE_INPUT] = 1;
3576 if (ctx.type == PIPE_SHADER_TESS_EVAL) {
3577 bool add_tesscoord = false, add_tess_inout = false;
3578 ctx.file_offset[TGSI_FILE_INPUT] = 1;
3579 for (i = 0; i < PIPE_MAX_SHADER_INPUTS; i++) {
3580 /* if we have tesscoord save one reg */
3581 if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_TESSCOORD)
3582 add_tesscoord = true;
3583 if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_TESSINNER ||
3584 ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_TESSOUTER)
3585 add_tess_inout = true;
3586 }
3587 if (add_tesscoord || add_tess_inout)
3588 ctx.file_offset[TGSI_FILE_INPUT]++;
3589 if (add_tess_inout)
3590 ctx.file_offset[TGSI_FILE_INPUT]+=2;
3591 }
3592 if (ctx.type == PIPE_SHADER_COMPUTE) {
3593 ctx.file_offset[TGSI_FILE_INPUT] = 2;
3594 for (i = 0; i < PIPE_MAX_SHADER_INPUTS; i++) {
3595 if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_GRID_SIZE)
3596 ctx.cs_grid_size_reg = ctx.file_offset[TGSI_FILE_INPUT]++;
3597 if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_BLOCK_SIZE)
3598 ctx.cs_block_size_reg = ctx.file_offset[TGSI_FILE_INPUT]++;
3599 }
3600 }
3601
3602 ctx.file_offset[TGSI_FILE_OUTPUT] =
3603 ctx.file_offset[TGSI_FILE_INPUT] +
3604 ctx.info.file_max[TGSI_FILE_INPUT] + 1;
3605 ctx.file_offset[TGSI_FILE_TEMPORARY] = ctx.file_offset[TGSI_FILE_OUTPUT] +
3606 ctx.info.file_max[TGSI_FILE_OUTPUT] + 1;
3607
3608 /* Outside the GPR range. This will be translated to one of the
3609 * kcache banks later. */
3610 ctx.file_offset[TGSI_FILE_CONSTANT] = 512;
3611 ctx.file_offset[TGSI_FILE_IMMEDIATE] = V_SQ_ALU_SRC_LITERAL;
3612
3613 pipeshader->scratch_space_needed = 0;
3614 int regno = ctx.file_offset[TGSI_FILE_TEMPORARY] +
3615 ctx.info.file_max[TGSI_FILE_TEMPORARY];
3616 if (regno > 124) {
3617 choose_spill_arrays(&ctx, &regno, &pipeshader->scratch_space_needed);
3618 shader->indirect_files = ctx.info.indirect_files;
3619 }
3620 shader->needs_scratch_space = pipeshader->scratch_space_needed != 0;
3621
3622 ctx.bc->ar_reg = ++regno;
3623 ctx.bc->index_reg[0] = ++regno;
3624 ctx.bc->index_reg[1] = ++regno;
3625
3626 if (ctx.type == PIPE_SHADER_TESS_CTRL) {
3627 ctx.tess_input_info = ++regno;
3628 ctx.tess_output_info = ++regno;
3629 } else if (ctx.type == PIPE_SHADER_TESS_EVAL) {
3630 ctx.tess_input_info = ++regno;
3631 ctx.tess_output_info = ++regno;
3632 } else if (ctx.type == PIPE_SHADER_GEOMETRY) {
3633 ctx.gs_export_gpr_tregs[0] = ++regno;
3634 ctx.gs_export_gpr_tregs[1] = ++regno;
3635 ctx.gs_export_gpr_tregs[2] = ++regno;
3636 ctx.gs_export_gpr_tregs[3] = ++regno;
3637 if (ctx.shader->gs_tri_strip_adj_fix) {
3638 ctx.gs_rotated_input[0] = ++regno;
3639 ctx.gs_rotated_input[1] = ++regno;
3640 } else {
3641 ctx.gs_rotated_input[0] = 0;
3642 ctx.gs_rotated_input[1] = 1;
3643 }
3644 }
3645
3646 if (shader->uses_images) {
3647 ctx.thread_id_gpr = ++regno;
3648 }
3649 ctx.temp_reg = ++regno;
3650
3651 shader->max_arrays = 0;
3652 shader->num_arrays = 0;
3653 if (indirect_gprs) {
3654
3655 if (ctx.info.indirect_files & (1 << TGSI_FILE_INPUT)) {
3656 r600_add_gpr_array(shader, ctx.file_offset[TGSI_FILE_INPUT],
3657 ctx.file_offset[TGSI_FILE_OUTPUT] -
3658 ctx.file_offset[TGSI_FILE_INPUT],
3659 0x0F);
3660 }
3661 if (ctx.info.indirect_files & (1 << TGSI_FILE_OUTPUT)) {
3662 r600_add_gpr_array(shader, ctx.file_offset[TGSI_FILE_OUTPUT],
3663 ctx.file_offset[TGSI_FILE_TEMPORARY] -
3664 ctx.file_offset[TGSI_FILE_OUTPUT],
3665 0x0F);
3666 }
3667 }
3668
3669 ctx.nliterals = 0;
3670 ctx.literals = NULL;
3671 ctx.max_driver_temp_used = 0;
3672
3673 shader->fs_write_all = ctx.info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] &&
3674 ctx.info.colors_written == 1;
3675 shader->vs_position_window_space = ctx.info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
3676 shader->ps_conservative_z = (uint8_t)ctx.info.properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT];
3677
3678 if (ctx.type == PIPE_SHADER_VERTEX ||
3679 ctx.type == PIPE_SHADER_GEOMETRY ||
3680 ctx.type == PIPE_SHADER_TESS_EVAL) {
3681 shader->cc_dist_mask = (1 << (ctx.info.properties[TGSI_PROPERTY_NUM_CULLDIST_ENABLED] +
3682 ctx.info.properties[TGSI_PROPERTY_NUM_CLIPDIST_ENABLED])) - 1;
3683 shader->clip_dist_write = (1 << ctx.info.properties[TGSI_PROPERTY_NUM_CLIPDIST_ENABLED]) - 1;
3684 shader->cull_dist_write = ((1 << ctx.info.properties[TGSI_PROPERTY_NUM_CULLDIST_ENABLED]) - 1) << ctx.info.properties[TGSI_PROPERTY_NUM_CLIPDIST_ENABLED];
3685 }
3686
3687 if (shader->vs_as_gs_a)
3688 vs_add_primid_output(&ctx, key.vs.prim_id_out);
3689
3690 if (ctx.thread_id_gpr != -1) {
3691 r = load_thread_id_gpr(&ctx);
3692 if (r)
3693 return r;
3694 }
3695
3696 if (ctx.type == PIPE_SHADER_TESS_EVAL)
3697 r600_fetch_tess_io_info(&ctx);
3698
3699 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
3700 tgsi_parse_token(&ctx.parse);
3701 switch (ctx.parse.FullToken.Token.Type) {
3702 case TGSI_TOKEN_TYPE_IMMEDIATE:
3703 immediate = &ctx.parse.FullToken.FullImmediate;
3704 ctx.literals = realloc(ctx.literals, (ctx.nliterals + 1) * 16);
3705 if(ctx.literals == NULL) {
3706 r = -ENOMEM;
3707 goto out_err;
3708 }
3709 ctx.literals[ctx.nliterals * 4 + 0] = immediate->u[0].Uint;
3710 ctx.literals[ctx.nliterals * 4 + 1] = immediate->u[1].Uint;
3711 ctx.literals[ctx.nliterals * 4 + 2] = immediate->u[2].Uint;
3712 ctx.literals[ctx.nliterals * 4 + 3] = immediate->u[3].Uint;
3713 ctx.nliterals++;
3714 break;
3715 case TGSI_TOKEN_TYPE_DECLARATION:
3716 r = tgsi_declaration(&ctx);
3717 if (r)
3718 goto out_err;
3719 break;
3720 case TGSI_TOKEN_TYPE_INSTRUCTION:
3721 case TGSI_TOKEN_TYPE_PROPERTY:
3722 break;
3723 default:
3724 R600_ERR("unsupported token type %d\n", ctx.parse.FullToken.Token.Type);
3725 r = -EINVAL;
3726 goto out_err;
3727 }
3728 }
3729
3730 shader->ring_item_sizes[0] = ctx.next_ring_offset;
3731 shader->ring_item_sizes[1] = 0;
3732 shader->ring_item_sizes[2] = 0;
3733 shader->ring_item_sizes[3] = 0;
3734
3735 /* Process two side if needed */
3736 if (shader->two_side && ctx.colors_used) {
3737 int i, count = ctx.shader->ninput;
3738 unsigned next_lds_loc = ctx.shader->nlds;
3739
3740 /* additional inputs will be allocated right after the existing inputs,
3741 * we won't need them after the color selection, so we don't need to
3742 * reserve these gprs for the rest of the shader code and to adjust
3743 * output offsets etc. */
3744 int gpr = ctx.file_offset[TGSI_FILE_INPUT] +
3745 ctx.info.file_max[TGSI_FILE_INPUT] + 1;
3746
3747 /* if two sided and neither face or sample mask is used by shader, ensure face_gpr is emitted */
3748 if (ctx.face_gpr == -1) {
3749 i = ctx.shader->ninput++;
3750 ctx.shader->input[i].name = TGSI_SEMANTIC_FACE;
3751 ctx.shader->input[i].spi_sid = 0;
3752 ctx.shader->input[i].gpr = gpr++;
3753 ctx.face_gpr = ctx.shader->input[i].gpr;
3754 }
3755
3756 for (i = 0; i < count; i++) {
3757 if (ctx.shader->input[i].name == TGSI_SEMANTIC_COLOR) {
3758 int ni = ctx.shader->ninput++;
3759 memcpy(&ctx.shader->input[ni],&ctx.shader->input[i], sizeof(struct r600_shader_io));
3760 ctx.shader->input[ni].name = TGSI_SEMANTIC_BCOLOR;
3761 ctx.shader->input[ni].spi_sid = r600_spi_sid(&ctx.shader->input[ni]);
3762 ctx.shader->input[ni].gpr = gpr++;
3763 // TGSI to LLVM needs to know the lds position of inputs.
3764 // Non LLVM path computes it later (in process_twoside_color)
3765 ctx.shader->input[ni].lds_pos = next_lds_loc++;
3766 ctx.shader->input[i].back_color_input = ni;
3767 if (ctx.bc->chip_class >= EVERGREEN) {
3768 if ((r = evergreen_interp_input(&ctx, ni)))
3769 return r;
3770 }
3771 }
3772 }
3773 }
3774
3775 if (shader->fs_write_all && rscreen->b.chip_class >= EVERGREEN)
3776 shader->nr_ps_max_color_exports = 8;
3777
3778 if (ctx.shader->uses_helper_invocation) {
3779 if (ctx.bc->chip_class == CAYMAN)
3780 r = cm_load_helper_invocation(&ctx);
3781 else
3782 r = eg_load_helper_invocation(&ctx);
3783 if (r)
3784 return r;
3785 }
3786
3787 /*
3788 * XXX this relies on fixed_pt_position_gpr only being present when
3789 * this shader should be executed per sample. Should be the case for now...
3790 */
3791 if (ctx.fixed_pt_position_gpr != -1 && ctx.info.reads_samplemask) {
3792 /*
3793 * Fix up sample mask. The hw always gives us coverage mask for
3794 * the pixel. However, for per-sample shading, we need the
3795 * coverage for the shader invocation only.
3796 * Also, with disabled msaa, only the first bit should be set
3797 * (luckily the same fixup works for both problems).
3798 * For now, we can only do it if we know this shader is always
3799 * executed per sample (due to usage of bits in the shader
3800 * forcing per-sample execution).
3801 * If the fb is not multisampled, we'd do unnecessary work but
3802 * it should still be correct.
3803 * It will however do nothing for sample shading according
3804 * to MinSampleShading.
3805 */
3806 struct r600_bytecode_alu alu;
3807 int tmp = r600_get_temp(&ctx);
3808 assert(ctx.face_gpr != -1);
3809 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3810
3811 alu.op = ALU_OP2_LSHL_INT;
3812 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
3813 alu.src[0].value = 0x1;
3814 alu.src[1].sel = ctx.fixed_pt_position_gpr;
3815 alu.src[1].chan = 3;
3816 alu.dst.sel = tmp;
3817 alu.dst.chan = 0;
3818 alu.dst.write = 1;
3819 alu.last = 1;
3820 if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
3821 return r;
3822
3823 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3824 alu.op = ALU_OP2_AND_INT;
3825 alu.src[0].sel = tmp;
3826 alu.src[1].sel = ctx.face_gpr;
3827 alu.src[1].chan = 2;
3828 alu.dst.sel = ctx.face_gpr;
3829 alu.dst.chan = 2;
3830 alu.dst.write = 1;
3831 alu.last = 1;
3832 if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
3833 return r;
3834 }
3835
3836 if (ctx.fragcoord_input >= 0) {
3837 if (ctx.bc->chip_class == CAYMAN) {
3838 for (j = 0 ; j < 4; j++) {
3839 struct r600_bytecode_alu alu;
3840 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3841 alu.op = ALU_OP1_RECIP_IEEE;
3842 alu.src[0].sel = shader->input[ctx.fragcoord_input].gpr;
3843 alu.src[0].chan = 3;
3844
3845 alu.dst.sel = shader->input[ctx.fragcoord_input].gpr;
3846 alu.dst.chan = j;
3847 alu.dst.write = (j == 3);
3848 alu.last = (j == 3);
3849 if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
3850 return r;
3851 }
3852 } else {
3853 struct r600_bytecode_alu alu;
3854 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3855 alu.op = ALU_OP1_RECIP_IEEE;
3856 alu.src[0].sel = shader->input[ctx.fragcoord_input].gpr;
3857 alu.src[0].chan = 3;
3858
3859 alu.dst.sel = shader->input[ctx.fragcoord_input].gpr;
3860 alu.dst.chan = 3;
3861 alu.dst.write = 1;
3862 alu.last = 1;
3863 if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
3864 return r;
3865 }
3866 }
3867
3868 if (ctx.type == PIPE_SHADER_GEOMETRY) {
3869 struct r600_bytecode_alu alu;
3870 int r;
3871
3872 /* GS thread with no output workaround - emit a cut at start of GS */
3873 if (ctx.bc->chip_class == R600)
3874 r600_bytecode_add_cfinst(ctx.bc, CF_OP_CUT_VERTEX);
3875
3876 for (j = 0; j < 4; j++) {
3877 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3878 alu.op = ALU_OP1_MOV;
3879 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
3880 alu.src[0].value = 0;
3881 alu.dst.sel = ctx.gs_export_gpr_tregs[j];
3882 alu.dst.write = 1;
3883 alu.last = 1;
3884 r = r600_bytecode_add_alu(ctx.bc, &alu);
3885 if (r)
3886 return r;
3887 }
3888
3889 if (ctx.shader->gs_tri_strip_adj_fix) {
3890 r = single_alu_op2(&ctx, ALU_OP2_AND_INT,
3891 ctx.gs_rotated_input[0], 2,
3892 0, 2,
3893 V_SQ_ALU_SRC_LITERAL, 1);
3894 if (r)
3895 return r;
3896
3897 for (i = 0; i < 6; i++) {
3898 int rotated = (i + 4) % 6;
3899 int offset_reg = i / 3;
3900 int offset_chan = i % 3;
3901 int rotated_offset_reg = rotated / 3;
3902 int rotated_offset_chan = rotated % 3;
3903
3904 if (offset_reg == 0 && offset_chan == 2)
3905 offset_chan = 3;
3906 if (rotated_offset_reg == 0 && rotated_offset_chan == 2)
3907 rotated_offset_chan = 3;
3908
3909 r = single_alu_op3(&ctx, ALU_OP3_CNDE_INT,
3910 ctx.gs_rotated_input[offset_reg], offset_chan,
3911 ctx.gs_rotated_input[0], 2,
3912 offset_reg, offset_chan,
3913 rotated_offset_reg, rotated_offset_chan);
3914 if (r)
3915 return r;
3916 }
3917 }
3918 }
3919
3920 if (ctx.type == PIPE_SHADER_TESS_CTRL)
3921 r600_fetch_tess_io_info(&ctx);
3922
3923 if (shader->two_side && ctx.colors_used) {
3924 if ((r = process_twoside_color_inputs(&ctx)))
3925 return r;
3926 }
3927
3928 tgsi_parse_init(&ctx.parse, tokens);
3929 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
3930 tgsi_parse_token(&ctx.parse);
3931 switch (ctx.parse.FullToken.Token.Type) {
3932 case TGSI_TOKEN_TYPE_INSTRUCTION:
3933 r = tgsi_is_supported(&ctx);
3934 if (r)
3935 goto out_err;
3936 ctx.max_driver_temp_used = 0;
3937 /* reserve first tmp for everyone */
3938 r600_get_temp(&ctx);
3939
3940 opcode = ctx.parse.FullToken.FullInstruction.Instruction.Opcode;
3941 if ((r = tgsi_split_constant(&ctx)))
3942 goto out_err;
3943 if ((r = tgsi_split_literal_constant(&ctx)))
3944 goto out_err;
3945 if (ctx.type == PIPE_SHADER_GEOMETRY) {
3946 if ((r = tgsi_split_gs_inputs(&ctx)))
3947 goto out_err;
3948 } else if (lds_inputs) {
3949 if ((r = tgsi_split_lds_inputs(&ctx)))
3950 goto out_err;
3951 }
3952 if (ctx.bc->chip_class == CAYMAN)
3953 ctx.inst_info = &cm_shader_tgsi_instruction[opcode];
3954 else if (ctx.bc->chip_class >= EVERGREEN)
3955 ctx.inst_info = &eg_shader_tgsi_instruction[opcode];
3956 else
3957 ctx.inst_info = &r600_shader_tgsi_instruction[opcode];
3958
3959 ctx.bc->precise |= ctx.parse.FullToken.FullInstruction.Instruction.Precise;
3960
3961 r = ctx.inst_info->process(&ctx);
3962 if (r)
3963 goto out_err;
3964
3965 if (ctx.type == PIPE_SHADER_TESS_CTRL) {
3966 r = r600_store_tcs_output(&ctx);
3967 if (r)
3968 goto out_err;
3969 }
3970 break;
3971 default:
3972 break;
3973 }
3974 }
3975
3976 /* Reset the temporary register counter. */
3977 ctx.max_driver_temp_used = 0;
3978
3979 noutput = shader->noutput;
3980
3981 if (!ring_outputs && ctx.clip_vertex_write) {
3982 unsigned clipdist_temp[2];
3983
3984 clipdist_temp[0] = r600_get_temp(&ctx);
3985 clipdist_temp[1] = r600_get_temp(&ctx);
3986
3987 /* need to convert a clipvertex write into clipdistance writes and not export
3988 the clip vertex anymore */
3989
3990 memset(&shader->output[noutput], 0, 2*sizeof(struct r600_shader_io));
3991 shader->output[noutput].name = TGSI_SEMANTIC_CLIPDIST;
3992 shader->output[noutput].gpr = clipdist_temp[0];
3993 noutput++;
3994 shader->output[noutput].name = TGSI_SEMANTIC_CLIPDIST;
3995 shader->output[noutput].gpr = clipdist_temp[1];
3996 noutput++;
3997
3998 /* reset spi_sid for clipvertex output to avoid confusing spi */
3999 shader->output[ctx.cv_output].spi_sid = 0;
4000
4001 shader->clip_dist_write = 0xFF;
4002 shader->cc_dist_mask = 0xFF;
4003
4004 for (i = 0; i < 8; i++) {
4005 int oreg = i >> 2;
4006 int ochan = i & 3;
4007
4008 for (j = 0; j < 4; j++) {
4009 struct r600_bytecode_alu alu;
4010 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4011 alu.op = ALU_OP2_DOT4;
4012 alu.src[0].sel = shader->output[ctx.cv_output].gpr;
4013 alu.src[0].chan = j;
4014
4015 alu.src[1].sel = 512 + i;
4016 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
4017 alu.src[1].chan = j;
4018
4019 alu.dst.sel = clipdist_temp[oreg];
4020 alu.dst.chan = j;
4021 alu.dst.write = (j == ochan);
4022 if (j == 3)
4023 alu.last = 1;
4024 r = r600_bytecode_add_alu(ctx.bc, &alu);
4025 if (r)
4026 return r;
4027 }
4028 }
4029 }
4030
4031 /* Add stream outputs. */
4032 if (so.num_outputs) {
4033 bool emit = false;
4034 if (!lds_outputs && !ring_outputs && ctx.type == PIPE_SHADER_VERTEX)
4035 emit = true;
4036 if (!ring_outputs && ctx.type == PIPE_SHADER_TESS_EVAL)
4037 emit = true;
4038 if (emit)
4039 emit_streamout(&ctx, &so, -1, NULL);
4040 }
4041 pipeshader->enabled_stream_buffers_mask = ctx.enabled_stream_buffers_mask;
4042 convert_edgeflag_to_int(&ctx);
4043
4044 if (ctx.type == PIPE_SHADER_TESS_CTRL)
4045 r600_emit_tess_factor(&ctx);
4046
4047 if (lds_outputs) {
4048 if (ctx.type == PIPE_SHADER_VERTEX) {
4049 if (ctx.shader->noutput)
4050 emit_lds_vs_writes(&ctx);
4051 }
4052 } else if (ring_outputs) {
4053 if (shader->vs_as_es || shader->tes_as_es) {
4054 ctx.gs_export_gpr_tregs[0] = r600_get_temp(&ctx);
4055 ctx.gs_export_gpr_tregs[1] = -1;
4056 ctx.gs_export_gpr_tregs[2] = -1;
4057 ctx.gs_export_gpr_tregs[3] = -1;
4058
4059 emit_gs_ring_writes(&ctx, &so, -1, FALSE);
4060 }
4061 } else {
4062 /* Export output */
4063 next_clip_base = shader->vs_out_misc_write ? 62 : 61;
4064
4065 for (i = 0, j = 0; i < noutput; i++, j++) {
4066 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
4067 output[j].gpr = shader->output[i].gpr;
4068 output[j].elem_size = 3;
4069 output[j].swizzle_x = 0;
4070 output[j].swizzle_y = 1;
4071 output[j].swizzle_z = 2;
4072 output[j].swizzle_w = 3;
4073 output[j].burst_count = 1;
4074 output[j].type = 0xffffffff;
4075 output[j].op = CF_OP_EXPORT;
4076 switch (ctx.type) {
4077 case PIPE_SHADER_VERTEX:
4078 case PIPE_SHADER_TESS_EVAL:
4079 switch (shader->output[i].name) {
4080 case TGSI_SEMANTIC_POSITION:
4081 output[j].array_base = 60;
4082 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
4083 pos_emitted = true;
4084 break;
4085
4086 case TGSI_SEMANTIC_PSIZE:
4087 output[j].array_base = 61;
4088 output[j].swizzle_y = 7;
4089 output[j].swizzle_z = 7;
4090 output[j].swizzle_w = 7;
4091 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
4092 pos_emitted = true;
4093 break;
4094 case TGSI_SEMANTIC_EDGEFLAG:
4095 output[j].array_base = 61;
4096 output[j].swizzle_x = 7;
4097 output[j].swizzle_y = 0;
4098 output[j].swizzle_z = 7;
4099 output[j].swizzle_w = 7;
4100 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
4101 pos_emitted = true;
4102 break;
4103 case TGSI_SEMANTIC_LAYER:
4104 /* spi_sid is 0 for outputs that are
4105 * not consumed by PS */
4106 if (shader->output[i].spi_sid) {
4107 output[j].array_base = next_param_base++;
4108 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
4109 j++;
4110 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
4111 }
4112 output[j].array_base = 61;
4113 output[j].swizzle_x = 7;
4114 output[j].swizzle_y = 7;
4115 output[j].swizzle_z = 0;
4116 output[j].swizzle_w = 7;
4117 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
4118 pos_emitted = true;
4119 break;
4120 case TGSI_SEMANTIC_VIEWPORT_INDEX:
4121 /* spi_sid is 0 for outputs that are
4122 * not consumed by PS */
4123 if (shader->output[i].spi_sid) {
4124 output[j].array_base = next_param_base++;
4125 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
4126 j++;
4127 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
4128 }
4129 output[j].array_base = 61;
4130 output[j].swizzle_x = 7;
4131 output[j].swizzle_y = 7;
4132 output[j].swizzle_z = 7;
4133 output[j].swizzle_w = 0;
4134 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
4135 pos_emitted = true;
4136 break;
4137 case TGSI_SEMANTIC_CLIPVERTEX:
4138 j--;
4139 break;
4140 case TGSI_SEMANTIC_CLIPDIST:
4141 output[j].array_base = next_clip_base++;
4142 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
4143 pos_emitted = true;
4144 /* spi_sid is 0 for clipdistance outputs that were generated
4145 * for clipvertex - we don't need to pass them to PS */
4146 if (shader->output[i].spi_sid) {
4147 j++;
4148 /* duplicate it as PARAM to pass to the pixel shader */
4149 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
4150 output[j].array_base = next_param_base++;
4151 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
4152 }
4153 break;
4154 case TGSI_SEMANTIC_FOG:
4155 output[j].swizzle_y = 4; /* 0 */
4156 output[j].swizzle_z = 4; /* 0 */
4157 output[j].swizzle_w = 5; /* 1 */
4158 break;
4159 case TGSI_SEMANTIC_PRIMID:
4160 output[j].swizzle_x = 2;
4161 output[j].swizzle_y = 4; /* 0 */
4162 output[j].swizzle_z = 4; /* 0 */
4163 output[j].swizzle_w = 4; /* 0 */
4164 break;
4165 }
4166
4167 break;
4168 case PIPE_SHADER_FRAGMENT:
4169 if (shader->output[i].name == TGSI_SEMANTIC_COLOR) {
4170 /* never export more colors than the number of CBs */
4171 if (shader->output[i].sid >= max_color_exports) {
4172 /* skip export */
4173 j--;
4174 continue;
4175 }
4176 output[j].swizzle_w = key.ps.alpha_to_one ? 5 : 3;
4177 output[j].array_base = shader->output[i].sid;
4178 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
4179 shader->nr_ps_color_exports++;
4180 shader->ps_color_export_mask |= (0xf << (shader->output[i].sid * 4));
4181
4182 /* If the i-th target format is set, all previous target formats must
4183 * be non-zero to avoid hangs. - from radeonsi, seems to apply to eg as well.
4184 */
4185 if (shader->output[i].sid > 0)
4186 for (unsigned x = 0; x < shader->output[i].sid; x++)
4187 shader->ps_color_export_mask |= (1 << (x*4));
4188
4189 if (shader->output[i].sid > shader->ps_export_highest)
4190 shader->ps_export_highest = shader->output[i].sid;
4191 if (shader->fs_write_all && (rscreen->b.chip_class >= EVERGREEN)) {
4192 for (k = 1; k < max_color_exports; k++) {
4193 j++;
4194 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
4195 output[j].gpr = shader->output[i].gpr;
4196 output[j].elem_size = 3;
4197 output[j].swizzle_x = 0;
4198 output[j].swizzle_y = 1;
4199 output[j].swizzle_z = 2;
4200 output[j].swizzle_w = key.ps.alpha_to_one ? 5 : 3;
4201 output[j].burst_count = 1;
4202 output[j].array_base = k;
4203 output[j].op = CF_OP_EXPORT;
4204 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
4205 shader->nr_ps_color_exports++;
4206 if (k > shader->ps_export_highest)
4207 shader->ps_export_highest = k;
4208 shader->ps_color_export_mask |= (0xf << (j * 4));
4209 }
4210 }
4211 } else if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
4212 output[j].array_base = 61;
4213 output[j].swizzle_x = 2;
4214 output[j].swizzle_y = 7;
4215 output[j].swizzle_z = output[j].swizzle_w = 7;
4216 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
4217 } else if (shader->output[i].name == TGSI_SEMANTIC_STENCIL) {
4218 output[j].array_base = 61;
4219 output[j].swizzle_x = 7;
4220 output[j].swizzle_y = 1;
4221 output[j].swizzle_z = output[j].swizzle_w = 7;
4222 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
4223 } else if (shader->output[i].name == TGSI_SEMANTIC_SAMPLEMASK) {
4224 output[j].array_base = 61;
4225 output[j].swizzle_x = 7;
4226 output[j].swizzle_y = 7;
4227 output[j].swizzle_z = 0;
4228 output[j].swizzle_w = 7;
4229 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
4230 } else {
4231 R600_ERR("unsupported fragment output name %d\n", shader->output[i].name);
4232 r = -EINVAL;
4233 goto out_err;
4234 }
4235 break;
4236 case PIPE_SHADER_TESS_CTRL:
4237 break;
4238 default:
4239 R600_ERR("unsupported processor type %d\n", ctx.type);
4240 r = -EINVAL;
4241 goto out_err;
4242 }
4243
4244 if (output[j].type == 0xffffffff) {
4245 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
4246 output[j].array_base = next_param_base++;
4247 }
4248 }
4249
4250 /* add fake position export */
4251 if ((ctx.type == PIPE_SHADER_VERTEX || ctx.type == PIPE_SHADER_TESS_EVAL) && pos_emitted == false) {
4252 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
4253 output[j].gpr = 0;
4254 output[j].elem_size = 3;
4255 output[j].swizzle_x = 7;
4256 output[j].swizzle_y = 7;
4257 output[j].swizzle_z = 7;
4258 output[j].swizzle_w = 7;
4259 output[j].burst_count = 1;
4260 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
4261 output[j].array_base = 60;
4262 output[j].op = CF_OP_EXPORT;
4263 j++;
4264 }
4265
4266 /* add fake param output for vertex shader if no param is exported */
4267 if ((ctx.type == PIPE_SHADER_VERTEX || ctx.type == PIPE_SHADER_TESS_EVAL) && next_param_base == 0) {
4268 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
4269 output[j].gpr = 0;
4270 output[j].elem_size = 3;
4271 output[j].swizzle_x = 7;
4272 output[j].swizzle_y = 7;
4273 output[j].swizzle_z = 7;
4274 output[j].swizzle_w = 7;
4275 output[j].burst_count = 1;
4276 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
4277 output[j].array_base = 0;
4278 output[j].op = CF_OP_EXPORT;
4279 j++;
4280 }
4281
4282 /* add fake pixel export */
4283 if (ctx.type == PIPE_SHADER_FRAGMENT && shader->nr_ps_color_exports == 0) {
4284 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
4285 output[j].gpr = 0;
4286 output[j].elem_size = 3;
4287 output[j].swizzle_x = 7;
4288 output[j].swizzle_y = 7;
4289 output[j].swizzle_z = 7;
4290 output[j].swizzle_w = 7;
4291 output[j].burst_count = 1;
4292 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
4293 output[j].array_base = 0;
4294 output[j].op = CF_OP_EXPORT;
4295 j++;
4296 shader->nr_ps_color_exports++;
4297 shader->ps_color_export_mask = 0xf;
4298 }
4299
4300 noutput = j;
4301
4302 /* set export done on last export of each type */
4303 for (k = noutput - 1, output_done = 0; k >= 0; k--) {
4304 if (!(output_done & (1 << output[k].type))) {
4305 output_done |= (1 << output[k].type);
4306 output[k].op = CF_OP_EXPORT_DONE;
4307 }
4308 }
4309 /* add output to bytecode */
4310 for (i = 0; i < noutput; i++) {
4311 r = r600_bytecode_add_output(ctx.bc, &output[i]);
4312 if (r)
4313 goto out_err;
4314 }
4315 }
4316
4317 /* add program end */
4318 if (ctx.bc->chip_class == CAYMAN)
4319 cm_bytecode_add_cf_end(ctx.bc);
4320 else {
4321 const struct cf_op_info *last = NULL;
4322
4323 if (ctx.bc->cf_last)
4324 last = r600_isa_cf(ctx.bc->cf_last->op);
4325
4326 /* alu clause instructions don't have EOP bit, so add NOP */
4327 if (!last || last->flags & CF_ALU || ctx.bc->cf_last->op == CF_OP_LOOP_END || ctx.bc->cf_last->op == CF_OP_POP)
4328 r600_bytecode_add_cfinst(ctx.bc, CF_OP_NOP);
4329
4330 ctx.bc->cf_last->end_of_program = 1;
4331 }
4332
4333 /* check GPR limit - we have 124 = 128 - 4
4334 * (4 are reserved as alu clause temporary registers) */
4335 if (ctx.bc->ngpr > 124) {
4336 R600_ERR("GPR limit exceeded - shader requires %d registers\n", ctx.bc->ngpr);
4337 r = -ENOMEM;
4338 goto out_err;
4339 }
4340
4341 if (ctx.type == PIPE_SHADER_GEOMETRY) {
4342 if ((r = generate_gs_copy_shader(rctx, pipeshader, &so)))
4343 return r;
4344 }
4345
4346 free(ctx.spilled_arrays);
4347 free(ctx.array_infos);
4348 free(ctx.literals);
4349 tgsi_parse_free(&ctx.parse);
4350 return 0;
4351 out_err:
4352 free(ctx.spilled_arrays);
4353 free(ctx.array_infos);
4354 free(ctx.literals);
4355 tgsi_parse_free(&ctx.parse);
4356 return r;
4357 }
4358
4359 static int tgsi_unsupported(struct r600_shader_ctx *ctx)
4360 {
4361 const unsigned tgsi_opcode =
4362 ctx->parse.FullToken.FullInstruction.Instruction.Opcode;
4363 R600_ERR("%s tgsi opcode unsupported\n",
4364 tgsi_get_opcode_name(tgsi_opcode));
4365 return -EINVAL;
4366 }
4367
4368 static int tgsi_end(struct r600_shader_ctx *ctx UNUSED)
4369 {
4370 return 0;
4371 }
4372
4373 static void r600_bytecode_src(struct r600_bytecode_alu_src *bc_src,
4374 const struct r600_shader_src *shader_src,
4375 unsigned chan)
4376 {
4377 bc_src->sel = shader_src->sel;
4378 bc_src->chan = shader_src->swizzle[chan];
4379 bc_src->neg = shader_src->neg;
4380 bc_src->abs = shader_src->abs;
4381 bc_src->rel = shader_src->rel;
4382 bc_src->value = shader_src->value[bc_src->chan];
4383 bc_src->kc_bank = shader_src->kc_bank;
4384 bc_src->kc_rel = shader_src->kc_rel;
4385 }
4386
4387 static void r600_bytecode_src_set_abs(struct r600_bytecode_alu_src *bc_src)
4388 {
4389 bc_src->abs = 1;
4390 bc_src->neg = 0;
4391 }
4392
4393 static void r600_bytecode_src_toggle_neg(struct r600_bytecode_alu_src *bc_src)
4394 {
4395 bc_src->neg = !bc_src->neg;
4396 }
4397
4398 static void tgsi_dst(struct r600_shader_ctx *ctx,
4399 const struct tgsi_full_dst_register *tgsi_dst,
4400 unsigned swizzle,
4401 struct r600_bytecode_alu_dst *r600_dst)
4402 {
4403 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4404
4405 if (tgsi_dst->Register.File == TGSI_FILE_TEMPORARY) {
4406 bool spilled;
4407 unsigned idx;
4408
4409 idx = map_tgsi_reg_index_to_r600_gpr(ctx, tgsi_dst->Register.Index, &spilled);
4410
4411 if (spilled) {
4412 struct r600_bytecode_output cf;
4413 int reg = 0;
4414 int r;
4415 bool add_pending_output = true;
4416
4417 memset(&cf, 0, sizeof(struct r600_bytecode_output));
4418 get_spilled_array_base_and_size(ctx, tgsi_dst->Register.Index,
4419 &cf.array_base, &cf.array_size);
4420
4421 /* If no component has spilled, reserve a register and add the spill code
4422 * ctx->bc->n_pending_outputs is cleared after each instruction group */
4423 if (ctx->bc->n_pending_outputs == 0) {
4424 reg = r600_get_temp(ctx);
4425 } else {
4426 /* If we are already spilling and the output address is the same like
4427 * before then just reuse the same slot */
4428 struct r600_bytecode_output *tmpl = &ctx->bc->pending_outputs[ctx->bc->n_pending_outputs-1];
4429 if ((cf.array_base + idx == tmpl->array_base) ||
4430 (cf.array_base == tmpl->array_base &&
4431 tmpl->index_gpr == ctx->bc->ar_reg &&
4432 tgsi_dst->Register.Indirect)) {
4433 reg = ctx->bc->pending_outputs[0].gpr;
4434 add_pending_output = false;
4435 } else {
4436 reg = r600_get_temp(ctx);
4437 }
4438 }
4439
4440 r600_dst->sel = reg;
4441 r600_dst->chan = swizzle;
4442 r600_dst->write = 1;
4443 if (inst->Instruction.Saturate) {
4444 r600_dst->clamp = 1;
4445 }
4446
4447 /* Add new outputs as pending */
4448 if (add_pending_output) {
4449 cf.op = CF_OP_MEM_SCRATCH;
4450 cf.elem_size = 3;
4451 cf.gpr = reg;
4452 cf.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE;
4453 cf.mark = 1;
4454 cf.comp_mask = inst->Dst[0].Register.WriteMask;
4455 cf.swizzle_x = 0;
4456 cf.swizzle_y = 1;
4457 cf.swizzle_z = 2;
4458 cf.swizzle_w = 3;
4459 cf.burst_count = 1;
4460
4461 if (tgsi_dst->Register.Indirect) {
4462 if (ctx->bc->chip_class < R700)
4463 cf.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND;
4464 else
4465 cf.type = 3; // V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND_ACK;
4466 cf.index_gpr = ctx->bc->ar_reg;
4467 }
4468 else {
4469 cf.array_base += idx;
4470 cf.array_size = 0;
4471 }
4472
4473 r = r600_bytecode_add_pending_output(ctx->bc, &cf);
4474 if (r)
4475 return;
4476
4477 if (ctx->bc->chip_class >= R700)
4478 r600_bytecode_need_wait_ack(ctx->bc, true);
4479 }
4480 return;
4481 }
4482 else {
4483 r600_dst->sel = idx;
4484 }
4485 }
4486 else {
4487 r600_dst->sel = tgsi_dst->Register.Index;
4488 r600_dst->sel += ctx->file_offset[tgsi_dst->Register.File];
4489 }
4490 r600_dst->chan = swizzle;
4491 r600_dst->write = 1;
4492 if (inst->Instruction.Saturate) {
4493 r600_dst->clamp = 1;
4494 }
4495 if (ctx->type == PIPE_SHADER_TESS_CTRL) {
4496 if (tgsi_dst->Register.File == TGSI_FILE_OUTPUT) {
4497 return;
4498 }
4499 }
4500 if (tgsi_dst->Register.Indirect)
4501 r600_dst->rel = V_SQ_REL_RELATIVE;
4502
4503 }
4504
4505 static int tgsi_op2_64_params(struct r600_shader_ctx *ctx, bool singledest, bool swap, int dest_temp, int op_override)
4506 {
4507 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4508 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4509 struct r600_bytecode_alu alu;
4510 int i, j, r, lasti = tgsi_last_instruction(write_mask);
4511 int use_tmp = 0;
4512 int swizzle_x = inst->Src[0].Register.SwizzleX;
4513
4514 if (singledest) {
4515 switch (write_mask) {
4516 case 0x1:
4517 if (swizzle_x == 2) {
4518 write_mask = 0xc;
4519 use_tmp = 3;
4520 } else
4521 write_mask = 0x3;
4522 break;
4523 case 0x2:
4524 if (swizzle_x == 2) {
4525 write_mask = 0xc;
4526 use_tmp = 3;
4527 } else {
4528 write_mask = 0x3;
4529 use_tmp = 1;
4530 }
4531 break;
4532 case 0x4:
4533 if (swizzle_x == 0) {
4534 write_mask = 0x3;
4535 use_tmp = 1;
4536 } else
4537 write_mask = 0xc;
4538 break;
4539 case 0x8:
4540 if (swizzle_x == 0) {
4541 write_mask = 0x3;
4542 use_tmp = 1;
4543 } else {
4544 write_mask = 0xc;
4545 use_tmp = 3;
4546 }
4547 break;
4548 }
4549 }
4550
4551 lasti = tgsi_last_instruction(write_mask);
4552 for (i = 0; i <= lasti; i++) {
4553
4554 if (!(write_mask & (1 << i)))
4555 continue;
4556
4557 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4558
4559 if (singledest) {
4560 if (use_tmp || dest_temp) {
4561 alu.dst.sel = use_tmp ? ctx->temp_reg : dest_temp;
4562 alu.dst.chan = i;
4563 alu.dst.write = 1;
4564 } else {
4565 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4566 }
4567 if (i == 1 || i == 3)
4568 alu.dst.write = 0;
4569 } else
4570 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4571
4572 alu.op = op_override ? op_override : ctx->inst_info->op;
4573 if (ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DABS) {
4574 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4575 } else if (!swap) {
4576 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4577 r600_bytecode_src(&alu.src[j], &ctx->src[j], fp64_switch(i));
4578 }
4579 } else {
4580 r600_bytecode_src(&alu.src[0], &ctx->src[1], fp64_switch(i));
4581 r600_bytecode_src(&alu.src[1], &ctx->src[0], fp64_switch(i));
4582 }
4583
4584 /* handle some special cases */
4585 if (i == 1 || i == 3) {
4586 switch (ctx->parse.FullToken.FullInstruction.Instruction.Opcode) {
4587 case TGSI_OPCODE_DABS:
4588 r600_bytecode_src_set_abs(&alu.src[0]);
4589 break;
4590 default:
4591 break;
4592 }
4593 }
4594 if (i == lasti) {
4595 alu.last = 1;
4596 }
4597 r = r600_bytecode_add_alu(ctx->bc, &alu);
4598 if (r)
4599 return r;
4600 }
4601
4602 if (use_tmp) {
4603 write_mask = inst->Dst[0].Register.WriteMask;
4604
4605 lasti = tgsi_last_instruction(write_mask);
4606 /* move result from temp to dst */
4607 for (i = 0; i <= lasti; i++) {
4608 if (!(write_mask & (1 << i)))
4609 continue;
4610
4611 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4612 alu.op = ALU_OP1_MOV;
4613
4614 if (dest_temp) {
4615 alu.dst.sel = dest_temp;
4616 alu.dst.chan = i;
4617 alu.dst.write = 1;
4618 } else
4619 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4620 alu.src[0].sel = ctx->temp_reg;
4621 alu.src[0].chan = use_tmp - 1;
4622 alu.last = (i == lasti);
4623
4624 r = r600_bytecode_add_alu(ctx->bc, &alu);
4625 if (r)
4626 return r;
4627 }
4628 }
4629 return 0;
4630 }
4631
4632 static int tgsi_op2_64(struct r600_shader_ctx *ctx)
4633 {
4634 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4635 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4636 /* confirm writemasking */
4637 if ((write_mask & 0x3) != 0x3 &&
4638 (write_mask & 0xc) != 0xc) {
4639 fprintf(stderr, "illegal writemask for 64-bit: 0x%x\n", write_mask);
4640 return -1;
4641 }
4642 return tgsi_op2_64_params(ctx, false, false, 0, 0);
4643 }
4644
4645 static int tgsi_op2_64_single_dest(struct r600_shader_ctx *ctx)
4646 {
4647 return tgsi_op2_64_params(ctx, true, false, 0, 0);
4648 }
4649
4650 static int tgsi_op2_64_single_dest_s(struct r600_shader_ctx *ctx)
4651 {
4652 return tgsi_op2_64_params(ctx, true, true, 0, 0);
4653 }
4654
4655 static int tgsi_op3_64(struct r600_shader_ctx *ctx)
4656 {
4657 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4658 struct r600_bytecode_alu alu;
4659 int i, j, r;
4660 int lasti = 3;
4661 int tmp = r600_get_temp(ctx);
4662
4663 for (i = 0; i < lasti + 1; i++) {
4664
4665 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4666 alu.op = ctx->inst_info->op;
4667 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4668 r600_bytecode_src(&alu.src[j], &ctx->src[j], i == 3 ? 0 : 1);
4669 }
4670
4671 if (inst->Dst[0].Register.WriteMask & (1 << i))
4672 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4673 else
4674 alu.dst.sel = tmp;
4675
4676 alu.dst.chan = i;
4677 alu.is_op3 = 1;
4678 if (i == lasti) {
4679 alu.last = 1;
4680 }
4681 r = r600_bytecode_add_alu(ctx->bc, &alu);
4682 if (r)
4683 return r;
4684 }
4685 return 0;
4686 }
4687
4688 static int tgsi_op2_s(struct r600_shader_ctx *ctx, int swap, int trans_only)
4689 {
4690 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4691 struct r600_bytecode_alu alu;
4692 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4693 int i, j, r, lasti = tgsi_last_instruction(write_mask);
4694 /* use temp register if trans_only and more than one dst component */
4695 int use_tmp = trans_only && (write_mask ^ (1 << lasti));
4696 unsigned op = ctx->inst_info->op;
4697
4698 if (op == ALU_OP2_MUL_IEEE &&
4699 ctx->info.properties[TGSI_PROPERTY_MUL_ZERO_WINS])
4700 op = ALU_OP2_MUL;
4701
4702 for (i = 0; i <= lasti; i++) {
4703 if (!(write_mask & (1 << i)))
4704 continue;
4705
4706 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4707 if (use_tmp) {
4708 alu.dst.sel = ctx->temp_reg;
4709 alu.dst.chan = i;
4710 alu.dst.write = 1;
4711 } else
4712 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4713
4714 alu.op = op;
4715 if (!swap) {
4716 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4717 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
4718 }
4719 } else {
4720 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
4721 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4722 }
4723 if (i == lasti || trans_only) {
4724 alu.last = 1;
4725 }
4726 r = r600_bytecode_add_alu(ctx->bc, &alu);
4727 if (r)
4728 return r;
4729 }
4730
4731 if (use_tmp) {
4732 /* move result from temp to dst */
4733 for (i = 0; i <= lasti; i++) {
4734 if (!(write_mask & (1 << i)))
4735 continue;
4736
4737 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4738 alu.op = ALU_OP1_MOV;
4739 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4740 alu.src[0].sel = ctx->temp_reg;
4741 alu.src[0].chan = i;
4742 alu.last = (i == lasti);
4743
4744 r = r600_bytecode_add_alu(ctx->bc, &alu);
4745 if (r)
4746 return r;
4747 }
4748 }
4749 return 0;
4750 }
4751
4752 static int tgsi_op2(struct r600_shader_ctx *ctx)
4753 {
4754 return tgsi_op2_s(ctx, 0, 0);
4755 }
4756
4757 static int tgsi_op2_swap(struct r600_shader_ctx *ctx)
4758 {
4759 return tgsi_op2_s(ctx, 1, 0);
4760 }
4761
4762 static int tgsi_op2_trans(struct r600_shader_ctx *ctx)
4763 {
4764 return tgsi_op2_s(ctx, 0, 1);
4765 }
4766
4767 static int tgsi_ineg(struct r600_shader_ctx *ctx)
4768 {
4769 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4770 struct r600_bytecode_alu alu;
4771 int i, r;
4772 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4773
4774 for (i = 0; i < lasti + 1; i++) {
4775
4776 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4777 continue;
4778 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4779 alu.op = ctx->inst_info->op;
4780
4781 alu.src[0].sel = V_SQ_ALU_SRC_0;
4782
4783 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4784
4785 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4786
4787 if (i == lasti) {
4788 alu.last = 1;
4789 }
4790 r = r600_bytecode_add_alu(ctx->bc, &alu);
4791 if (r)
4792 return r;
4793 }
4794 return 0;
4795
4796 }
4797
4798 static int tgsi_dneg(struct r600_shader_ctx *ctx)
4799 {
4800 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4801 struct r600_bytecode_alu alu;
4802 int i, r;
4803 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4804
4805 for (i = 0; i < lasti + 1; i++) {
4806
4807 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4808 continue;
4809 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4810 alu.op = ALU_OP1_MOV;
4811
4812 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4813
4814 if (i == 1 || i == 3)
4815 r600_bytecode_src_toggle_neg(&alu.src[0]);
4816 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4817
4818 if (i == lasti) {
4819 alu.last = 1;
4820 }
4821 r = r600_bytecode_add_alu(ctx->bc, &alu);
4822 if (r)
4823 return r;
4824 }
4825 return 0;
4826
4827 }
4828
4829 static int tgsi_dfracexp(struct r600_shader_ctx *ctx)
4830 {
4831 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4832 struct r600_bytecode_alu alu;
4833 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4834 int i, j, r;
4835
4836 for (i = 0; i <= 3; i++) {
4837 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4838 alu.op = ctx->inst_info->op;
4839
4840 alu.dst.sel = ctx->temp_reg;
4841 alu.dst.chan = i;
4842 alu.dst.write = 1;
4843 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4844 r600_bytecode_src(&alu.src[j], &ctx->src[j], fp64_switch(i));
4845 }
4846
4847 if (i == 3)
4848 alu.last = 1;
4849
4850 r = r600_bytecode_add_alu(ctx->bc, &alu);
4851 if (r)
4852 return r;
4853 }
4854
4855 /* Replicate significand result across channels. */
4856 for (i = 0; i <= 3; i++) {
4857 if (!(write_mask & (1 << i)))
4858 continue;
4859
4860 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4861 alu.op = ALU_OP1_MOV;
4862 alu.src[0].chan = (i & 1) + 2;
4863 alu.src[0].sel = ctx->temp_reg;
4864
4865 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4866 alu.dst.write = 1;
4867 alu.last = 1;
4868 r = r600_bytecode_add_alu(ctx->bc, &alu);
4869 if (r)
4870 return r;
4871 }
4872
4873 for (i = 0; i <= 3; i++) {
4874 if (inst->Dst[1].Register.WriteMask & (1 << i)) {
4875 /* MOV third channels to writemask dst1 */
4876 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4877 alu.op = ALU_OP1_MOV;
4878 alu.src[0].chan = 1;
4879 alu.src[0].sel = ctx->temp_reg;
4880
4881 tgsi_dst(ctx, &inst->Dst[1], i, &alu.dst);
4882 alu.last = 1;
4883 r = r600_bytecode_add_alu(ctx->bc, &alu);
4884 if (r)
4885 return r;
4886 break;
4887 }
4888 }
4889 return 0;
4890 }
4891
4892
4893 static int egcm_int_to_double(struct r600_shader_ctx *ctx)
4894 {
4895 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4896 struct r600_bytecode_alu alu;
4897 int i, c, r;
4898 int write_mask = inst->Dst[0].Register.WriteMask;
4899 int temp_reg = r600_get_temp(ctx);
4900
4901 assert(inst->Instruction.Opcode == TGSI_OPCODE_I2D ||
4902 inst->Instruction.Opcode == TGSI_OPCODE_U2D);
4903
4904 for (c = 0; c < 2; c++) {
4905 int dchan = c * 2;
4906 if (write_mask & (0x3 << dchan)) {
4907 /* split into 24-bit int and 8-bit int */
4908 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4909 alu.op = ALU_OP2_AND_INT;
4910 alu.dst.sel = temp_reg;
4911 alu.dst.chan = dchan;
4912 r600_bytecode_src(&alu.src[0], &ctx->src[0], c);
4913 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
4914 alu.src[1].value = 0xffffff00;
4915 alu.dst.write = 1;
4916 r = r600_bytecode_add_alu(ctx->bc, &alu);
4917 if (r)
4918 return r;
4919
4920 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4921 alu.op = ALU_OP2_AND_INT;
4922 alu.dst.sel = temp_reg;
4923 alu.dst.chan = dchan + 1;
4924 r600_bytecode_src(&alu.src[0], &ctx->src[0], c);
4925 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
4926 alu.src[1].value = 0xff;
4927 alu.dst.write = 1;
4928 alu.last = 1;
4929 r = r600_bytecode_add_alu(ctx->bc, &alu);
4930 if (r)
4931 return r;
4932 }
4933 }
4934
4935 for (c = 0; c < 2; c++) {
4936 int dchan = c * 2;
4937 if (write_mask & (0x3 << dchan)) {
4938 for (i = dchan; i <= dchan + 1; i++) {
4939 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4940 alu.op = i == dchan ? ctx->inst_info->op : ALU_OP1_UINT_TO_FLT;
4941
4942 alu.src[0].sel = temp_reg;
4943 alu.src[0].chan = i;
4944 alu.dst.sel = temp_reg;
4945 alu.dst.chan = i;
4946 alu.dst.write = 1;
4947 if (ctx->bc->chip_class == CAYMAN)
4948 alu.last = i == dchan + 1;
4949 else
4950 alu.last = 1; /* trans only ops on evergreen */
4951
4952 r = r600_bytecode_add_alu(ctx->bc, &alu);
4953 if (r)
4954 return r;
4955 }
4956 }
4957 }
4958
4959 for (c = 0; c < 2; c++) {
4960 int dchan = c * 2;
4961 if (write_mask & (0x3 << dchan)) {
4962 for (i = 0; i < 4; i++) {
4963 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4964 alu.op = ALU_OP1_FLT32_TO_FLT64;
4965
4966 alu.src[0].chan = dchan + (i / 2);
4967 if (i == 0 || i == 2)
4968 alu.src[0].sel = temp_reg;
4969 else {
4970 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
4971 alu.src[0].value = 0x0;
4972 }
4973 alu.dst.sel = ctx->temp_reg;
4974 alu.dst.chan = i;
4975 alu.last = i == 3;
4976 alu.dst.write = 1;
4977
4978 r = r600_bytecode_add_alu(ctx->bc, &alu);
4979 if (r)
4980 return r;
4981 }
4982
4983 for (i = 0; i <= 1; i++) {
4984 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4985 alu.op = ALU_OP2_ADD_64;
4986
4987 alu.src[0].chan = fp64_switch(i);
4988 alu.src[0].sel = ctx->temp_reg;
4989
4990 alu.src[1].chan = fp64_switch(i + 2);
4991 alu.src[1].sel = ctx->temp_reg;
4992 tgsi_dst(ctx, &inst->Dst[0], dchan + i, &alu.dst);
4993 alu.last = i == 1;
4994
4995 r = r600_bytecode_add_alu(ctx->bc, &alu);
4996 if (r)
4997 return r;
4998 }
4999 }
5000 }
5001
5002 return 0;
5003 }
5004
5005 static int egcm_double_to_int(struct r600_shader_ctx *ctx)
5006 {
5007 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5008 struct r600_bytecode_alu alu;
5009 int i, r;
5010 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
5011 int treg = r600_get_temp(ctx);
5012 assert(inst->Instruction.Opcode == TGSI_OPCODE_D2I ||
5013 inst->Instruction.Opcode == TGSI_OPCODE_D2U);
5014
5015 /* do a 64->32 into a temp register */
5016 r = tgsi_op2_64_params(ctx, true, false, treg, ALU_OP1_FLT64_TO_FLT32);
5017 if (r)
5018 return r;
5019
5020 for (i = 0; i <= lasti; i++) {
5021 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
5022 continue;
5023 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5024 alu.op = ctx->inst_info->op;
5025
5026 alu.src[0].chan = i;
5027 alu.src[0].sel = treg;
5028 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5029 alu.last = (i == lasti);
5030
5031 r = r600_bytecode_add_alu(ctx->bc, &alu);
5032 if (r)
5033 return r;
5034 }
5035
5036 return 0;
5037 }
5038
5039 static int cayman_emit_unary_double_raw(struct r600_bytecode *bc,
5040 unsigned op,
5041 int dst_reg,
5042 struct r600_shader_src *src,
5043 bool abs)
5044 {
5045 struct r600_bytecode_alu alu;
5046 const int last_slot = 3;
5047 int r;
5048
5049 /* these have to write the result to X/Y by the looks of it */
5050 for (int i = 0 ; i < last_slot; i++) {
5051 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5052 alu.op = op;
5053
5054 r600_bytecode_src(&alu.src[0], src, 1);
5055 r600_bytecode_src(&alu.src[1], src, 0);
5056
5057 if (abs)
5058 r600_bytecode_src_set_abs(&alu.src[1]);
5059
5060 alu.dst.sel = dst_reg;
5061 alu.dst.chan = i;
5062 alu.dst.write = (i == 0 || i == 1);
5063
5064 if (bc->chip_class != CAYMAN || i == last_slot - 1)
5065 alu.last = 1;
5066 r = r600_bytecode_add_alu(bc, &alu);
5067 if (r)
5068 return r;
5069 }
5070
5071 return 0;
5072 }
5073
5074 static int cayman_emit_double_instr(struct r600_shader_ctx *ctx)
5075 {
5076 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5077 int i, r;
5078 struct r600_bytecode_alu alu;
5079 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
5080 int t1 = ctx->temp_reg;
5081
5082 /* should only be one src regs */
5083 assert(inst->Instruction.NumSrcRegs == 1);
5084
5085 /* only support one double at a time */
5086 assert(inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ||
5087 inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_ZW);
5088
5089 r = cayman_emit_unary_double_raw(
5090 ctx->bc, ctx->inst_info->op, t1,
5091 &ctx->src[0],
5092 ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DRSQ ||
5093 ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DSQRT);
5094 if (r)
5095 return r;
5096
5097 for (i = 0 ; i <= lasti; i++) {
5098 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
5099 continue;
5100 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5101 alu.op = ALU_OP1_MOV;
5102 alu.src[0].sel = t1;
5103 alu.src[0].chan = (i == 0 || i == 2) ? 0 : 1;
5104 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5105 alu.dst.write = 1;
5106 if (i == lasti)
5107 alu.last = 1;
5108 r = r600_bytecode_add_alu(ctx->bc, &alu);
5109 if (r)
5110 return r;
5111 }
5112 return 0;
5113 }
5114
5115 static int cayman_emit_float_instr(struct r600_shader_ctx *ctx)
5116 {
5117 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5118 int i, j, r;
5119 struct r600_bytecode_alu alu;
5120 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
5121
5122 for (i = 0 ; i < last_slot; i++) {
5123 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5124 alu.op = ctx->inst_info->op;
5125 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
5126 r600_bytecode_src(&alu.src[j], &ctx->src[j], 0);
5127
5128 /* RSQ should take the absolute value of src */
5129 if (inst->Instruction.Opcode == TGSI_OPCODE_RSQ) {
5130 r600_bytecode_src_set_abs(&alu.src[j]);
5131 }
5132 }
5133 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5134 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
5135
5136 if (i == last_slot - 1)
5137 alu.last = 1;
5138 r = r600_bytecode_add_alu(ctx->bc, &alu);
5139 if (r)
5140 return r;
5141 }
5142 return 0;
5143 }
5144
5145 static int cayman_mul_int_instr(struct r600_shader_ctx *ctx)
5146 {
5147 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5148 int i, j, k, r;
5149 struct r600_bytecode_alu alu;
5150 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
5151 int t1 = ctx->temp_reg;
5152
5153 for (k = 0; k <= lasti; k++) {
5154 if (!(inst->Dst[0].Register.WriteMask & (1 << k)))
5155 continue;
5156
5157 for (i = 0 ; i < 4; i++) {
5158 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5159 alu.op = ctx->inst_info->op;
5160 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
5161 r600_bytecode_src(&alu.src[j], &ctx->src[j], k);
5162 }
5163 alu.dst.sel = t1;
5164 alu.dst.chan = i;
5165 alu.dst.write = (i == k);
5166 if (i == 3)
5167 alu.last = 1;
5168 r = r600_bytecode_add_alu(ctx->bc, &alu);
5169 if (r)
5170 return r;
5171 }
5172 }
5173
5174 for (i = 0 ; i <= lasti; i++) {
5175 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
5176 continue;
5177 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5178 alu.op = ALU_OP1_MOV;
5179 alu.src[0].sel = t1;
5180 alu.src[0].chan = i;
5181 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5182 alu.dst.write = 1;
5183 if (i == lasti)
5184 alu.last = 1;
5185 r = r600_bytecode_add_alu(ctx->bc, &alu);
5186 if (r)
5187 return r;
5188 }
5189
5190 return 0;
5191 }
5192
5193
5194 static int cayman_mul_double_instr(struct r600_shader_ctx *ctx)
5195 {
5196 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5197 int i, j, k, r;
5198 struct r600_bytecode_alu alu;
5199 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
5200 int t1 = ctx->temp_reg;
5201
5202 /* t1 would get overwritten below if we actually tried to
5203 * multiply two pairs of doubles at a time. */
5204 assert(inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ||
5205 inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_ZW);
5206
5207 k = inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ? 0 : 1;
5208
5209 for (i = 0; i < 4; i++) {
5210 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5211 alu.op = ctx->inst_info->op;
5212 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
5213 r600_bytecode_src(&alu.src[j], &ctx->src[j], k * 2 + ((i == 3) ? 0 : 1));
5214 }
5215 alu.dst.sel = t1;
5216 alu.dst.chan = i;
5217 alu.dst.write = 1;
5218 if (i == 3)
5219 alu.last = 1;
5220 r = r600_bytecode_add_alu(ctx->bc, &alu);
5221 if (r)
5222 return r;
5223 }
5224
5225 for (i = 0; i <= lasti; i++) {
5226 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
5227 continue;
5228 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5229 alu.op = ALU_OP1_MOV;
5230 alu.src[0].sel = t1;
5231 alu.src[0].chan = i;
5232 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5233 alu.dst.write = 1;
5234 if (i == lasti)
5235 alu.last = 1;
5236 r = r600_bytecode_add_alu(ctx->bc, &alu);
5237 if (r)
5238 return r;
5239 }
5240
5241 return 0;
5242 }
5243
5244 /*
5245 * Emit RECIP_64 + MUL_64 to implement division.
5246 */
5247 static int cayman_ddiv_instr(struct r600_shader_ctx *ctx)
5248 {
5249 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5250 int r;
5251 struct r600_bytecode_alu alu;
5252 int t1 = ctx->temp_reg;
5253 int k;
5254
5255 /* Only support one double at a time. This is the same constraint as
5256 * in DMUL lowering. */
5257 assert(inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ||
5258 inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_ZW);
5259
5260 k = inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ? 0 : 1;
5261
5262 r = cayman_emit_unary_double_raw(ctx->bc, ALU_OP2_RECIP_64, t1, &ctx->src[1], false);
5263 if (r)
5264 return r;
5265
5266 for (int i = 0; i < 4; i++) {
5267 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5268 alu.op = ALU_OP2_MUL_64;
5269
5270 r600_bytecode_src(&alu.src[0], &ctx->src[0], k * 2 + ((i == 3) ? 0 : 1));
5271
5272 alu.src[1].sel = t1;
5273 alu.src[1].chan = (i == 3) ? 0 : 1;
5274
5275 alu.dst.sel = t1;
5276 alu.dst.chan = i;
5277 alu.dst.write = 1;
5278 if (i == 3)
5279 alu.last = 1;
5280 r = r600_bytecode_add_alu(ctx->bc, &alu);
5281 if (r)
5282 return r;
5283 }
5284
5285 for (int i = 0; i < 2; i++) {
5286 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5287 alu.op = ALU_OP1_MOV;
5288 alu.src[0].sel = t1;
5289 alu.src[0].chan = i;
5290 tgsi_dst(ctx, &inst->Dst[0], k * 2 + i, &alu.dst);
5291 alu.dst.write = 1;
5292 if (i == 1)
5293 alu.last = 1;
5294 r = r600_bytecode_add_alu(ctx->bc, &alu);
5295 if (r)
5296 return r;
5297 }
5298 return 0;
5299 }
5300
5301 /*
5302 * r600 - trunc to -PI..PI range
5303 * r700 - normalize by dividing by 2PI
5304 * see fdo bug 27901
5305 */
5306 static int tgsi_setup_trig(struct r600_shader_ctx *ctx)
5307 {
5308 int r;
5309 struct r600_bytecode_alu alu;
5310
5311 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5312 alu.op = ALU_OP3_MULADD;
5313 alu.is_op3 = 1;
5314
5315 alu.dst.chan = 0;
5316 alu.dst.sel = ctx->temp_reg;
5317 alu.dst.write = 1;
5318
5319 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5320
5321 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
5322 alu.src[1].chan = 0;
5323 alu.src[1].value = u_bitcast_f2u(0.5f * M_1_PI);
5324 alu.src[2].sel = V_SQ_ALU_SRC_0_5;
5325 alu.src[2].chan = 0;
5326 alu.last = 1;
5327 r = r600_bytecode_add_alu(ctx->bc, &alu);
5328 if (r)
5329 return r;
5330
5331 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5332 alu.op = ALU_OP1_FRACT;
5333
5334 alu.dst.chan = 0;
5335 alu.dst.sel = ctx->temp_reg;
5336 alu.dst.write = 1;
5337
5338 alu.src[0].sel = ctx->temp_reg;
5339 alu.src[0].chan = 0;
5340 alu.last = 1;
5341 r = r600_bytecode_add_alu(ctx->bc, &alu);
5342 if (r)
5343 return r;
5344
5345 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5346 alu.op = ALU_OP3_MULADD;
5347 alu.is_op3 = 1;
5348
5349 alu.dst.chan = 0;
5350 alu.dst.sel = ctx->temp_reg;
5351 alu.dst.write = 1;
5352
5353 alu.src[0].sel = ctx->temp_reg;
5354 alu.src[0].chan = 0;
5355
5356 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
5357 alu.src[1].chan = 0;
5358 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
5359 alu.src[2].chan = 0;
5360
5361 if (ctx->bc->chip_class == R600) {
5362 alu.src[1].value = u_bitcast_f2u(2.0f * M_PI);
5363 alu.src[2].value = u_bitcast_f2u(-M_PI);
5364 } else {
5365 alu.src[1].sel = V_SQ_ALU_SRC_1;
5366 alu.src[2].sel = V_SQ_ALU_SRC_0_5;
5367 alu.src[2].neg = 1;
5368 }
5369
5370 alu.last = 1;
5371 r = r600_bytecode_add_alu(ctx->bc, &alu);
5372 if (r)
5373 return r;
5374 return 0;
5375 }
5376
5377 static int cayman_trig(struct r600_shader_ctx *ctx)
5378 {
5379 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5380 struct r600_bytecode_alu alu;
5381 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
5382 int i, r;
5383
5384 r = tgsi_setup_trig(ctx);
5385 if (r)
5386 return r;
5387
5388
5389 for (i = 0; i < last_slot; i++) {
5390 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5391 alu.op = ctx->inst_info->op;
5392 alu.dst.chan = i;
5393
5394 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5395 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
5396
5397 alu.src[0].sel = ctx->temp_reg;
5398 alu.src[0].chan = 0;
5399 if (i == last_slot - 1)
5400 alu.last = 1;
5401 r = r600_bytecode_add_alu(ctx->bc, &alu);
5402 if (r)
5403 return r;
5404 }
5405 return 0;
5406 }
5407
5408 static int tgsi_trig(struct r600_shader_ctx *ctx)
5409 {
5410 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5411 struct r600_bytecode_alu alu;
5412 int i, r;
5413 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
5414
5415 r = tgsi_setup_trig(ctx);
5416 if (r)
5417 return r;
5418
5419 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5420 alu.op = ctx->inst_info->op;
5421 alu.dst.chan = 0;
5422 alu.dst.sel = ctx->temp_reg;
5423 alu.dst.write = 1;
5424
5425 alu.src[0].sel = ctx->temp_reg;
5426 alu.src[0].chan = 0;
5427 alu.last = 1;
5428 r = r600_bytecode_add_alu(ctx->bc, &alu);
5429 if (r)
5430 return r;
5431
5432 /* replicate result */
5433 for (i = 0; i < lasti + 1; i++) {
5434 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
5435 continue;
5436
5437 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5438 alu.op = ALU_OP1_MOV;
5439
5440 alu.src[0].sel = ctx->temp_reg;
5441 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5442 if (i == lasti)
5443 alu.last = 1;
5444 r = r600_bytecode_add_alu(ctx->bc, &alu);
5445 if (r)
5446 return r;
5447 }
5448 return 0;
5449 }
5450
5451 static int tgsi_kill(struct r600_shader_ctx *ctx)
5452 {
5453 const struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5454 struct r600_bytecode_alu alu;
5455 int i, r;
5456
5457 for (i = 0; i < 4; i++) {
5458 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5459 alu.op = ctx->inst_info->op;
5460
5461 alu.dst.chan = i;
5462
5463 alu.src[0].sel = V_SQ_ALU_SRC_0;
5464
5465 if (inst->Instruction.Opcode == TGSI_OPCODE_KILL) {
5466 alu.src[1].sel = V_SQ_ALU_SRC_1;
5467 alu.src[1].neg = 1;
5468 } else {
5469 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
5470 }
5471 if (i == 3) {
5472 alu.last = 1;
5473 }
5474 r = r600_bytecode_add_alu(ctx->bc, &alu);
5475 if (r)
5476 return r;
5477 }
5478
5479 /* kill must be last in ALU */
5480 ctx->bc->force_add_cf = 1;
5481 ctx->shader->uses_kill = TRUE;
5482 return 0;
5483 }
5484
5485 static int tgsi_lit(struct r600_shader_ctx *ctx)
5486 {
5487 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5488 struct r600_bytecode_alu alu;
5489 int r;
5490
5491 /* tmp.x = max(src.y, 0.0) */
5492 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5493 alu.op = ALU_OP2_MAX;
5494 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
5495 alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/
5496 alu.src[1].chan = 1;
5497
5498 alu.dst.sel = ctx->temp_reg;
5499 alu.dst.chan = 0;
5500 alu.dst.write = 1;
5501
5502 alu.last = 1;
5503 r = r600_bytecode_add_alu(ctx->bc, &alu);
5504 if (r)
5505 return r;
5506
5507 if (inst->Dst[0].Register.WriteMask & (1 << 2))
5508 {
5509 int chan;
5510 int sel;
5511 unsigned i;
5512
5513 if (ctx->bc->chip_class == CAYMAN) {
5514 for (i = 0; i < 3; i++) {
5515 /* tmp.z = log(tmp.x) */
5516 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5517 alu.op = ALU_OP1_LOG_CLAMPED;
5518 alu.src[0].sel = ctx->temp_reg;
5519 alu.src[0].chan = 0;
5520 alu.dst.sel = ctx->temp_reg;
5521 alu.dst.chan = i;
5522 if (i == 2) {
5523 alu.dst.write = 1;
5524 alu.last = 1;
5525 } else
5526 alu.dst.write = 0;
5527
5528 r = r600_bytecode_add_alu(ctx->bc, &alu);
5529 if (r)
5530 return r;
5531 }
5532 } else {
5533 /* tmp.z = log(tmp.x) */
5534 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5535 alu.op = ALU_OP1_LOG_CLAMPED;
5536 alu.src[0].sel = ctx->temp_reg;
5537 alu.src[0].chan = 0;
5538 alu.dst.sel = ctx->temp_reg;
5539 alu.dst.chan = 2;
5540 alu.dst.write = 1;
5541 alu.last = 1;
5542 r = r600_bytecode_add_alu(ctx->bc, &alu);
5543 if (r)
5544 return r;
5545 }
5546
5547 chan = alu.dst.chan;
5548 sel = alu.dst.sel;
5549
5550 /* tmp.x = amd MUL_LIT(tmp.z, src.w, src.x ) */
5551 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5552 alu.op = ALU_OP3_MUL_LIT;
5553 alu.src[0].sel = sel;
5554 alu.src[0].chan = chan;
5555 r600_bytecode_src(&alu.src[1], &ctx->src[0], 3);
5556 r600_bytecode_src(&alu.src[2], &ctx->src[0], 0);
5557 alu.dst.sel = ctx->temp_reg;
5558 alu.dst.chan = 0;
5559 alu.dst.write = 1;
5560 alu.is_op3 = 1;
5561 alu.last = 1;
5562 r = r600_bytecode_add_alu(ctx->bc, &alu);
5563 if (r)
5564 return r;
5565
5566 if (ctx->bc->chip_class == CAYMAN) {
5567 for (i = 0; i < 3; i++) {
5568 /* dst.z = exp(tmp.x) */
5569 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5570 alu.op = ALU_OP1_EXP_IEEE;
5571 alu.src[0].sel = ctx->temp_reg;
5572 alu.src[0].chan = 0;
5573 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5574 if (i == 2) {
5575 alu.dst.write = 1;
5576 alu.last = 1;
5577 } else
5578 alu.dst.write = 0;
5579 r = r600_bytecode_add_alu(ctx->bc, &alu);
5580 if (r)
5581 return r;
5582 }
5583 } else {
5584 /* dst.z = exp(tmp.x) */
5585 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5586 alu.op = ALU_OP1_EXP_IEEE;
5587 alu.src[0].sel = ctx->temp_reg;
5588 alu.src[0].chan = 0;
5589 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
5590 alu.last = 1;
5591 r = r600_bytecode_add_alu(ctx->bc, &alu);
5592 if (r)
5593 return r;
5594 }
5595 }
5596
5597 /* dst.x, <- 1.0 */
5598 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5599 alu.op = ALU_OP1_MOV;
5600 alu.src[0].sel = V_SQ_ALU_SRC_1; /*1.0*/
5601 alu.src[0].chan = 0;
5602 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
5603 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 0) & 1;
5604 r = r600_bytecode_add_alu(ctx->bc, &alu);
5605 if (r)
5606 return r;
5607
5608 /* dst.y = max(src.x, 0.0) */
5609 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5610 alu.op = ALU_OP2_MAX;
5611 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5612 alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/
5613 alu.src[1].chan = 0;
5614 tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
5615 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 1) & 1;
5616 r = r600_bytecode_add_alu(ctx->bc, &alu);
5617 if (r)
5618 return r;
5619
5620 /* dst.w, <- 1.0 */
5621 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5622 alu.op = ALU_OP1_MOV;
5623 alu.src[0].sel = V_SQ_ALU_SRC_1;
5624 alu.src[0].chan = 0;
5625 tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
5626 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 3) & 1;
5627 alu.last = 1;
5628 r = r600_bytecode_add_alu(ctx->bc, &alu);
5629 if (r)
5630 return r;
5631
5632 return 0;
5633 }
5634
5635 static int tgsi_rsq(struct r600_shader_ctx *ctx)
5636 {
5637 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5638 struct r600_bytecode_alu alu;
5639 int i, r;
5640
5641 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5642
5643 alu.op = ALU_OP1_RECIPSQRT_IEEE;
5644
5645 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
5646 r600_bytecode_src(&alu.src[i], &ctx->src[i], 0);
5647 r600_bytecode_src_set_abs(&alu.src[i]);
5648 }
5649 alu.dst.sel = ctx->temp_reg;
5650 alu.dst.write = 1;
5651 alu.last = 1;
5652 r = r600_bytecode_add_alu(ctx->bc, &alu);
5653 if (r)
5654 return r;
5655 /* replicate result */
5656 return tgsi_helper_tempx_replicate(ctx);
5657 }
5658
5659 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx)
5660 {
5661 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5662 struct r600_bytecode_alu alu;
5663 int i, r;
5664
5665 for (i = 0; i < 4; i++) {
5666 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5667 alu.src[0].sel = ctx->temp_reg;
5668 alu.op = ALU_OP1_MOV;
5669 alu.dst.chan = i;
5670 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5671 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
5672 if (i == 3)
5673 alu.last = 1;
5674 r = r600_bytecode_add_alu(ctx->bc, &alu);
5675 if (r)
5676 return r;
5677 }
5678 return 0;
5679 }
5680
5681 static int tgsi_trans_srcx_replicate(struct r600_shader_ctx *ctx)
5682 {
5683 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5684 struct r600_bytecode_alu alu;
5685 int i, r;
5686
5687 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5688 alu.op = ctx->inst_info->op;
5689 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
5690 r600_bytecode_src(&alu.src[i], &ctx->src[i], 0);
5691 }
5692 alu.dst.sel = ctx->temp_reg;
5693 alu.dst.write = 1;
5694 alu.last = 1;
5695 r = r600_bytecode_add_alu(ctx->bc, &alu);
5696 if (r)
5697 return r;
5698 /* replicate result */
5699 return tgsi_helper_tempx_replicate(ctx);
5700 }
5701
5702 static int cayman_pow(struct r600_shader_ctx *ctx)
5703 {
5704 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5705 int i, r;
5706 struct r600_bytecode_alu alu;
5707 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
5708
5709 for (i = 0; i < 3; i++) {
5710 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5711 alu.op = ALU_OP1_LOG_IEEE;
5712 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5713 alu.dst.sel = ctx->temp_reg;
5714 alu.dst.chan = i;
5715 alu.dst.write = 1;
5716 if (i == 2)
5717 alu.last = 1;
5718 r = r600_bytecode_add_alu(ctx->bc, &alu);
5719 if (r)
5720 return r;
5721 }
5722
5723 /* b * LOG2(a) */
5724 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5725 alu.op = ALU_OP2_MUL;
5726 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
5727 alu.src[1].sel = ctx->temp_reg;
5728 alu.dst.sel = ctx->temp_reg;
5729 alu.dst.write = 1;
5730 alu.last = 1;
5731 r = r600_bytecode_add_alu(ctx->bc, &alu);
5732 if (r)
5733 return r;
5734
5735 for (i = 0; i < last_slot; i++) {
5736 /* POW(a,b) = EXP2(b * LOG2(a))*/
5737 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5738 alu.op = ALU_OP1_EXP_IEEE;
5739 alu.src[0].sel = ctx->temp_reg;
5740
5741 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5742 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
5743 if (i == last_slot - 1)
5744 alu.last = 1;
5745 r = r600_bytecode_add_alu(ctx->bc, &alu);
5746 if (r)
5747 return r;
5748 }
5749 return 0;
5750 }
5751
5752 static int tgsi_pow(struct r600_shader_ctx *ctx)
5753 {
5754 struct r600_bytecode_alu alu;
5755 int r;
5756
5757 /* LOG2(a) */
5758 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5759 alu.op = ALU_OP1_LOG_IEEE;
5760 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5761 alu.dst.sel = ctx->temp_reg;
5762 alu.dst.write = 1;
5763 alu.last = 1;
5764 r = r600_bytecode_add_alu(ctx->bc, &alu);
5765 if (r)
5766 return r;
5767 /* b * LOG2(a) */
5768 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5769 alu.op = ALU_OP2_MUL;
5770 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
5771 alu.src[1].sel = ctx->temp_reg;
5772 alu.dst.sel = ctx->temp_reg;
5773 alu.dst.write = 1;
5774 alu.last = 1;
5775 r = r600_bytecode_add_alu(ctx->bc, &alu);
5776 if (r)
5777 return r;
5778 /* POW(a,b) = EXP2(b * LOG2(a))*/
5779 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5780 alu.op = ALU_OP1_EXP_IEEE;
5781 alu.src[0].sel = ctx->temp_reg;
5782 alu.dst.sel = ctx->temp_reg;
5783 alu.dst.write = 1;
5784 alu.last = 1;
5785 r = r600_bytecode_add_alu(ctx->bc, &alu);
5786 if (r)
5787 return r;
5788 return tgsi_helper_tempx_replicate(ctx);
5789 }
5790
5791 static int emit_mul_int_op(struct r600_bytecode *bc,
5792 struct r600_bytecode_alu *alu_src)
5793 {
5794 struct r600_bytecode_alu alu;
5795 int i, r;
5796 alu = *alu_src;
5797 if (bc->chip_class == CAYMAN) {
5798 for (i = 0; i < 4; i++) {
5799 alu.dst.chan = i;
5800 alu.dst.write = (i == alu_src->dst.chan);
5801 alu.last = (i == 3);
5802
5803 r = r600_bytecode_add_alu(bc, &alu);
5804 if (r)
5805 return r;
5806 }
5807 } else {
5808 alu.last = 1;
5809 r = r600_bytecode_add_alu(bc, &alu);
5810 if (r)
5811 return r;
5812 }
5813 return 0;
5814 }
5815
5816 static int tgsi_divmod(struct r600_shader_ctx *ctx, int mod, int signed_op)
5817 {
5818 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5819 struct r600_bytecode_alu alu;
5820 int i, r, j;
5821 unsigned write_mask = inst->Dst[0].Register.WriteMask;
5822 int lasti = tgsi_last_instruction(write_mask);
5823 int tmp0 = ctx->temp_reg;
5824 int tmp1 = r600_get_temp(ctx);
5825 int tmp2 = r600_get_temp(ctx);
5826 int tmp3 = r600_get_temp(ctx);
5827 int tmp4 = 0;
5828
5829 /* Use additional temp if dst register and src register are the same */
5830 if (inst->Src[0].Register.Index == inst->Dst[0].Register.Index ||
5831 inst->Src[1].Register.Index == inst->Dst[0].Register.Index) {
5832 tmp4 = r600_get_temp(ctx);
5833 }
5834
5835 /* Unsigned path:
5836 *
5837 * we need to represent src1 as src2*q + r, where q - quotient, r - remainder
5838 *
5839 * 1. tmp0.x = rcp (src2) = 2^32/src2 + e, where e is rounding error
5840 * 2. tmp0.z = lo (tmp0.x * src2)
5841 * 3. tmp0.w = -tmp0.z
5842 * 4. tmp0.y = hi (tmp0.x * src2)
5843 * 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src2))
5844 * 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error
5845 * 7. tmp1.x = tmp0.x - tmp0.w
5846 * 8. tmp1.y = tmp0.x + tmp0.w
5847 * 9. tmp0.x = (tmp0.y == 0 ? tmp1.y : tmp1.x)
5848 * 10. tmp0.z = hi(tmp0.x * src1) = q
5849 * 11. tmp0.y = lo (tmp0.z * src2) = src2*q = src1 - r
5850 *
5851 * 12. tmp0.w = src1 - tmp0.y = r
5852 * 13. tmp1.x = tmp0.w >= src2 = r >= src2 (uint comparison)
5853 * 14. tmp1.y = src1 >= tmp0.y = r >= 0 (uint comparison)
5854 *
5855 * if DIV
5856 *
5857 * 15. tmp1.z = tmp0.z + 1 = q + 1
5858 * 16. tmp1.w = tmp0.z - 1 = q - 1
5859 *
5860 * else MOD
5861 *
5862 * 15. tmp1.z = tmp0.w - src2 = r - src2
5863 * 16. tmp1.w = tmp0.w + src2 = r + src2
5864 *
5865 * endif
5866 *
5867 * 17. tmp1.x = tmp1.x & tmp1.y
5868 *
5869 * DIV: 18. tmp0.z = tmp1.x==0 ? tmp0.z : tmp1.z
5870 * MOD: 18. tmp0.z = tmp1.x==0 ? tmp0.w : tmp1.z
5871 *
5872 * 19. tmp0.z = tmp1.y==0 ? tmp1.w : tmp0.z
5873 * 20. dst = src2==0 ? MAX_UINT : tmp0.z
5874 *
5875 * Signed path:
5876 *
5877 * Same as unsigned, using abs values of the operands,
5878 * and fixing the sign of the result in the end.
5879 */
5880
5881 for (i = 0; i < 4; i++) {
5882 if (!(write_mask & (1<<i)))
5883 continue;
5884
5885 if (signed_op) {
5886
5887 /* tmp2.x = -src0 */
5888 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5889 alu.op = ALU_OP2_SUB_INT;
5890
5891 alu.dst.sel = tmp2;
5892 alu.dst.chan = 0;
5893 alu.dst.write = 1;
5894
5895 alu.src[0].sel = V_SQ_ALU_SRC_0;
5896
5897 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
5898
5899 alu.last = 1;
5900 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5901 return r;
5902
5903 /* tmp2.y = -src1 */
5904 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5905 alu.op = ALU_OP2_SUB_INT;
5906
5907 alu.dst.sel = tmp2;
5908 alu.dst.chan = 1;
5909 alu.dst.write = 1;
5910
5911 alu.src[0].sel = V_SQ_ALU_SRC_0;
5912
5913 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5914
5915 alu.last = 1;
5916 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5917 return r;
5918
5919 /* tmp2.z sign bit is set if src0 and src2 signs are different */
5920 /* it will be a sign of the quotient */
5921 if (!mod) {
5922
5923 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5924 alu.op = ALU_OP2_XOR_INT;
5925
5926 alu.dst.sel = tmp2;
5927 alu.dst.chan = 2;
5928 alu.dst.write = 1;
5929
5930 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5931 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5932
5933 alu.last = 1;
5934 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5935 return r;
5936 }
5937
5938 /* tmp2.x = |src0| */
5939 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5940 alu.op = ALU_OP3_CNDGE_INT;
5941 alu.is_op3 = 1;
5942
5943 alu.dst.sel = tmp2;
5944 alu.dst.chan = 0;
5945 alu.dst.write = 1;
5946
5947 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5948 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
5949 alu.src[2].sel = tmp2;
5950 alu.src[2].chan = 0;
5951
5952 alu.last = 1;
5953 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5954 return r;
5955
5956 /* tmp2.y = |src1| */
5957 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5958 alu.op = ALU_OP3_CNDGE_INT;
5959 alu.is_op3 = 1;
5960
5961 alu.dst.sel = tmp2;
5962 alu.dst.chan = 1;
5963 alu.dst.write = 1;
5964
5965 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
5966 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5967 alu.src[2].sel = tmp2;
5968 alu.src[2].chan = 1;
5969
5970 alu.last = 1;
5971 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5972 return r;
5973
5974 }
5975
5976 /* 1. tmp0.x = rcp_u (src2) = 2^32/src2 + e, where e is rounding error */
5977 if (ctx->bc->chip_class == CAYMAN) {
5978 /* tmp3.x = u2f(src2) */
5979 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5980 alu.op = ALU_OP1_UINT_TO_FLT;
5981
5982 alu.dst.sel = tmp3;
5983 alu.dst.chan = 0;
5984 alu.dst.write = 1;
5985
5986 if (signed_op) {
5987 alu.src[0].sel = tmp2;
5988 alu.src[0].chan = 1;
5989 } else {
5990 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
5991 }
5992
5993 alu.last = 1;
5994 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5995 return r;
5996
5997 /* tmp0.x = recip(tmp3.x) */
5998 for (j = 0 ; j < 3; j++) {
5999 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6000 alu.op = ALU_OP1_RECIP_IEEE;
6001
6002 alu.dst.sel = tmp0;
6003 alu.dst.chan = j;
6004 alu.dst.write = (j == 0);
6005
6006 alu.src[0].sel = tmp3;
6007 alu.src[0].chan = 0;
6008
6009 if (j == 2)
6010 alu.last = 1;
6011 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6012 return r;
6013 }
6014
6015 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6016 alu.op = ALU_OP2_MUL;
6017
6018 alu.src[0].sel = tmp0;
6019 alu.src[0].chan = 0;
6020
6021 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6022 alu.src[1].value = 0x4f800000;
6023
6024 alu.dst.sel = tmp3;
6025 alu.dst.write = 1;
6026 alu.last = 1;
6027 r = r600_bytecode_add_alu(ctx->bc, &alu);
6028 if (r)
6029 return r;
6030
6031 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6032 alu.op = ALU_OP1_FLT_TO_UINT;
6033
6034 alu.dst.sel = tmp0;
6035 alu.dst.chan = 0;
6036 alu.dst.write = 1;
6037
6038 alu.src[0].sel = tmp3;
6039 alu.src[0].chan = 0;
6040
6041 alu.last = 1;
6042 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6043 return r;
6044
6045 } else {
6046 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6047 alu.op = ALU_OP1_RECIP_UINT;
6048
6049 alu.dst.sel = tmp0;
6050 alu.dst.chan = 0;
6051 alu.dst.write = 1;
6052
6053 if (signed_op) {
6054 alu.src[0].sel = tmp2;
6055 alu.src[0].chan = 1;
6056 } else {
6057 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
6058 }
6059
6060 alu.last = 1;
6061 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6062 return r;
6063 }
6064
6065 /* 2. tmp0.z = lo (tmp0.x * src2) */
6066 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6067 alu.op = ALU_OP2_MULLO_UINT;
6068
6069 alu.dst.sel = tmp0;
6070 alu.dst.chan = 2;
6071 alu.dst.write = 1;
6072
6073 alu.src[0].sel = tmp0;
6074 alu.src[0].chan = 0;
6075 if (signed_op) {
6076 alu.src[1].sel = tmp2;
6077 alu.src[1].chan = 1;
6078 } else {
6079 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
6080 }
6081
6082 if ((r = emit_mul_int_op(ctx->bc, &alu)))
6083 return r;
6084
6085 /* 3. tmp0.w = -tmp0.z */
6086 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6087 alu.op = ALU_OP2_SUB_INT;
6088
6089 alu.dst.sel = tmp0;
6090 alu.dst.chan = 3;
6091 alu.dst.write = 1;
6092
6093 alu.src[0].sel = V_SQ_ALU_SRC_0;
6094 alu.src[1].sel = tmp0;
6095 alu.src[1].chan = 2;
6096
6097 alu.last = 1;
6098 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6099 return r;
6100
6101 /* 4. tmp0.y = hi (tmp0.x * src2) */
6102 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6103 alu.op = ALU_OP2_MULHI_UINT;
6104
6105 alu.dst.sel = tmp0;
6106 alu.dst.chan = 1;
6107 alu.dst.write = 1;
6108
6109 alu.src[0].sel = tmp0;
6110 alu.src[0].chan = 0;
6111
6112 if (signed_op) {
6113 alu.src[1].sel = tmp2;
6114 alu.src[1].chan = 1;
6115 } else {
6116 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
6117 }
6118
6119 if ((r = emit_mul_int_op(ctx->bc, &alu)))
6120 return r;
6121
6122 /* 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src)) */
6123 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6124 alu.op = ALU_OP3_CNDE_INT;
6125 alu.is_op3 = 1;
6126
6127 alu.dst.sel = tmp0;
6128 alu.dst.chan = 2;
6129 alu.dst.write = 1;
6130
6131 alu.src[0].sel = tmp0;
6132 alu.src[0].chan = 1;
6133 alu.src[1].sel = tmp0;
6134 alu.src[1].chan = 3;
6135 alu.src[2].sel = tmp0;
6136 alu.src[2].chan = 2;
6137
6138 alu.last = 1;
6139 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6140 return r;
6141
6142 /* 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error */
6143 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6144 alu.op = ALU_OP2_MULHI_UINT;
6145
6146 alu.dst.sel = tmp0;
6147 alu.dst.chan = 3;
6148 alu.dst.write = 1;
6149
6150 alu.src[0].sel = tmp0;
6151 alu.src[0].chan = 2;
6152
6153 alu.src[1].sel = tmp0;
6154 alu.src[1].chan = 0;
6155
6156 if ((r = emit_mul_int_op(ctx->bc, &alu)))
6157 return r;
6158
6159 /* 7. tmp1.x = tmp0.x - tmp0.w */
6160 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6161 alu.op = ALU_OP2_SUB_INT;
6162
6163 alu.dst.sel = tmp1;
6164 alu.dst.chan = 0;
6165 alu.dst.write = 1;
6166
6167 alu.src[0].sel = tmp0;
6168 alu.src[0].chan = 0;
6169 alu.src[1].sel = tmp0;
6170 alu.src[1].chan = 3;
6171
6172 alu.last = 1;
6173 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6174 return r;
6175
6176 /* 8. tmp1.y = tmp0.x + tmp0.w */
6177 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6178 alu.op = ALU_OP2_ADD_INT;
6179
6180 alu.dst.sel = tmp1;
6181 alu.dst.chan = 1;
6182 alu.dst.write = 1;
6183
6184 alu.src[0].sel = tmp0;
6185 alu.src[0].chan = 0;
6186 alu.src[1].sel = tmp0;
6187 alu.src[1].chan = 3;
6188
6189 alu.last = 1;
6190 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6191 return r;
6192
6193 /* 9. tmp0.x = (tmp0.y == 0 ? tmp1.y : tmp1.x) */
6194 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6195 alu.op = ALU_OP3_CNDE_INT;
6196 alu.is_op3 = 1;
6197
6198 alu.dst.sel = tmp0;
6199 alu.dst.chan = 0;
6200 alu.dst.write = 1;
6201
6202 alu.src[0].sel = tmp0;
6203 alu.src[0].chan = 1;
6204 alu.src[1].sel = tmp1;
6205 alu.src[1].chan = 1;
6206 alu.src[2].sel = tmp1;
6207 alu.src[2].chan = 0;
6208
6209 alu.last = 1;
6210 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6211 return r;
6212
6213 /* 10. tmp0.z = hi(tmp0.x * src1) = q */
6214 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6215 alu.op = ALU_OP2_MULHI_UINT;
6216
6217 alu.dst.sel = tmp0;
6218 alu.dst.chan = 2;
6219 alu.dst.write = 1;
6220
6221 alu.src[0].sel = tmp0;
6222 alu.src[0].chan = 0;
6223
6224 if (signed_op) {
6225 alu.src[1].sel = tmp2;
6226 alu.src[1].chan = 0;
6227 } else {
6228 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
6229 }
6230
6231 if ((r = emit_mul_int_op(ctx->bc, &alu)))
6232 return r;
6233
6234 /* 11. tmp0.y = lo (src2 * tmp0.z) = src2*q = src1 - r */
6235 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6236 alu.op = ALU_OP2_MULLO_UINT;
6237
6238 alu.dst.sel = tmp0;
6239 alu.dst.chan = 1;
6240 alu.dst.write = 1;
6241
6242 if (signed_op) {
6243 alu.src[0].sel = tmp2;
6244 alu.src[0].chan = 1;
6245 } else {
6246 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
6247 }
6248
6249 alu.src[1].sel = tmp0;
6250 alu.src[1].chan = 2;
6251
6252 if ((r = emit_mul_int_op(ctx->bc, &alu)))
6253 return r;
6254
6255 /* 12. tmp0.w = src1 - tmp0.y = r */
6256 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6257 alu.op = ALU_OP2_SUB_INT;
6258
6259 alu.dst.sel = tmp0;
6260 alu.dst.chan = 3;
6261 alu.dst.write = 1;
6262
6263 if (signed_op) {
6264 alu.src[0].sel = tmp2;
6265 alu.src[0].chan = 0;
6266 } else {
6267 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6268 }
6269
6270 alu.src[1].sel = tmp0;
6271 alu.src[1].chan = 1;
6272
6273 alu.last = 1;
6274 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6275 return r;
6276
6277 /* 13. tmp1.x = tmp0.w >= src2 = r >= src2 */
6278 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6279 alu.op = ALU_OP2_SETGE_UINT;
6280
6281 alu.dst.sel = tmp1;
6282 alu.dst.chan = 0;
6283 alu.dst.write = 1;
6284
6285 alu.src[0].sel = tmp0;
6286 alu.src[0].chan = 3;
6287 if (signed_op) {
6288 alu.src[1].sel = tmp2;
6289 alu.src[1].chan = 1;
6290 } else {
6291 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
6292 }
6293
6294 alu.last = 1;
6295 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6296 return r;
6297
6298 /* 14. tmp1.y = src1 >= tmp0.y = r >= 0 */
6299 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6300 alu.op = ALU_OP2_SETGE_UINT;
6301
6302 alu.dst.sel = tmp1;
6303 alu.dst.chan = 1;
6304 alu.dst.write = 1;
6305
6306 if (signed_op) {
6307 alu.src[0].sel = tmp2;
6308 alu.src[0].chan = 0;
6309 } else {
6310 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6311 }
6312
6313 alu.src[1].sel = tmp0;
6314 alu.src[1].chan = 1;
6315
6316 alu.last = 1;
6317 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6318 return r;
6319
6320 if (mod) { /* UMOD */
6321
6322 /* 15. tmp1.z = tmp0.w - src2 = r - src2 */
6323 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6324 alu.op = ALU_OP2_SUB_INT;
6325
6326 alu.dst.sel = tmp1;
6327 alu.dst.chan = 2;
6328 alu.dst.write = 1;
6329
6330 alu.src[0].sel = tmp0;
6331 alu.src[0].chan = 3;
6332
6333 if (signed_op) {
6334 alu.src[1].sel = tmp2;
6335 alu.src[1].chan = 1;
6336 } else {
6337 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
6338 }
6339
6340 alu.last = 1;
6341 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6342 return r;
6343
6344 /* 16. tmp1.w = tmp0.w + src2 = r + src2 */
6345 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6346 alu.op = ALU_OP2_ADD_INT;
6347
6348 alu.dst.sel = tmp1;
6349 alu.dst.chan = 3;
6350 alu.dst.write = 1;
6351
6352 alu.src[0].sel = tmp0;
6353 alu.src[0].chan = 3;
6354 if (signed_op) {
6355 alu.src[1].sel = tmp2;
6356 alu.src[1].chan = 1;
6357 } else {
6358 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
6359 }
6360
6361 alu.last = 1;
6362 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6363 return r;
6364
6365 } else { /* UDIV */
6366
6367 /* 15. tmp1.z = tmp0.z + 1 = q + 1 DIV */
6368 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6369 alu.op = ALU_OP2_ADD_INT;
6370
6371 alu.dst.sel = tmp1;
6372 alu.dst.chan = 2;
6373 alu.dst.write = 1;
6374
6375 alu.src[0].sel = tmp0;
6376 alu.src[0].chan = 2;
6377 alu.src[1].sel = V_SQ_ALU_SRC_1_INT;
6378
6379 alu.last = 1;
6380 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6381 return r;
6382
6383 /* 16. tmp1.w = tmp0.z - 1 = q - 1 */
6384 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6385 alu.op = ALU_OP2_ADD_INT;
6386
6387 alu.dst.sel = tmp1;
6388 alu.dst.chan = 3;
6389 alu.dst.write = 1;
6390
6391 alu.src[0].sel = tmp0;
6392 alu.src[0].chan = 2;
6393 alu.src[1].sel = V_SQ_ALU_SRC_M_1_INT;
6394
6395 alu.last = 1;
6396 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6397 return r;
6398
6399 }
6400
6401 /* 17. tmp1.x = tmp1.x & tmp1.y */
6402 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6403 alu.op = ALU_OP2_AND_INT;
6404
6405 alu.dst.sel = tmp1;
6406 alu.dst.chan = 0;
6407 alu.dst.write = 1;
6408
6409 alu.src[0].sel = tmp1;
6410 alu.src[0].chan = 0;
6411 alu.src[1].sel = tmp1;
6412 alu.src[1].chan = 1;
6413
6414 alu.last = 1;
6415 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6416 return r;
6417
6418 /* 18. tmp0.z = tmp1.x==0 ? tmp0.z : tmp1.z DIV */
6419 /* 18. tmp0.z = tmp1.x==0 ? tmp0.w : tmp1.z MOD */
6420 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6421 alu.op = ALU_OP3_CNDE_INT;
6422 alu.is_op3 = 1;
6423
6424 alu.dst.sel = tmp0;
6425 alu.dst.chan = 2;
6426 alu.dst.write = 1;
6427
6428 alu.src[0].sel = tmp1;
6429 alu.src[0].chan = 0;
6430 alu.src[1].sel = tmp0;
6431 alu.src[1].chan = mod ? 3 : 2;
6432 alu.src[2].sel = tmp1;
6433 alu.src[2].chan = 2;
6434
6435 alu.last = 1;
6436 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6437 return r;
6438
6439 /* 19. tmp0.z = tmp1.y==0 ? tmp1.w : tmp0.z */
6440 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6441 alu.op = ALU_OP3_CNDE_INT;
6442 alu.is_op3 = 1;
6443
6444 if (signed_op) {
6445 alu.dst.sel = tmp0;
6446 alu.dst.chan = 2;
6447 alu.dst.write = 1;
6448 } else {
6449 if (tmp4 > 0) {
6450 alu.dst.sel = tmp4;
6451 alu.dst.chan = i;
6452 alu.dst.write = 1;
6453 } else {
6454 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6455 }
6456 }
6457
6458 alu.src[0].sel = tmp1;
6459 alu.src[0].chan = 1;
6460 alu.src[1].sel = tmp1;
6461 alu.src[1].chan = 3;
6462 alu.src[2].sel = tmp0;
6463 alu.src[2].chan = 2;
6464
6465 alu.last = 1;
6466 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6467 return r;
6468
6469 if (signed_op) {
6470
6471 /* fix the sign of the result */
6472
6473 if (mod) {
6474
6475 /* tmp0.x = -tmp0.z */
6476 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6477 alu.op = ALU_OP2_SUB_INT;
6478
6479 alu.dst.sel = tmp0;
6480 alu.dst.chan = 0;
6481 alu.dst.write = 1;
6482
6483 alu.src[0].sel = V_SQ_ALU_SRC_0;
6484 alu.src[1].sel = tmp0;
6485 alu.src[1].chan = 2;
6486
6487 alu.last = 1;
6488 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6489 return r;
6490
6491 /* sign of the remainder is the same as the sign of src0 */
6492 /* tmp0.x = src0>=0 ? tmp0.z : tmp0.x */
6493 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6494 alu.op = ALU_OP3_CNDGE_INT;
6495 alu.is_op3 = 1;
6496
6497 if (tmp4 > 0) {
6498 alu.dst.sel = tmp4;
6499 alu.dst.chan = i;
6500 alu.dst.write = 1;
6501 } else {
6502 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6503 }
6504
6505 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6506 alu.src[1].sel = tmp0;
6507 alu.src[1].chan = 2;
6508 alu.src[2].sel = tmp0;
6509 alu.src[2].chan = 0;
6510
6511 alu.last = 1;
6512 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6513 return r;
6514
6515 } else {
6516
6517 /* tmp0.x = -tmp0.z */
6518 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6519 alu.op = ALU_OP2_SUB_INT;
6520
6521 alu.dst.sel = tmp0;
6522 alu.dst.chan = 0;
6523 alu.dst.write = 1;
6524
6525 alu.src[0].sel = V_SQ_ALU_SRC_0;
6526 alu.src[1].sel = tmp0;
6527 alu.src[1].chan = 2;
6528
6529 alu.last = 1;
6530 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6531 return r;
6532
6533 /* fix the quotient sign (same as the sign of src0*src1) */
6534 /* tmp0.x = tmp2.z>=0 ? tmp0.z : tmp0.x */
6535 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6536 alu.op = ALU_OP3_CNDGE_INT;
6537 alu.is_op3 = 1;
6538
6539 if (tmp4 > 0) {
6540 alu.dst.sel = tmp4;
6541 alu.dst.chan = i;
6542 alu.dst.write = 1;
6543 } else {
6544 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6545 }
6546
6547 alu.src[0].sel = tmp2;
6548 alu.src[0].chan = 2;
6549 alu.src[1].sel = tmp0;
6550 alu.src[1].chan = 2;
6551 alu.src[2].sel = tmp0;
6552 alu.src[2].chan = 0;
6553
6554 alu.last = 1;
6555 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6556 return r;
6557 }
6558 }
6559 }
6560
6561 if (tmp4 > 0) {
6562 for (i = 0; i <= lasti; ++i) {
6563 if (!(write_mask & (1<<i)))
6564 continue;
6565
6566 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6567 alu.op = ALU_OP1_MOV;
6568 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6569 alu.src[0].sel = tmp4;
6570 alu.src[0].chan = i;
6571
6572 if (i == lasti)
6573 alu.last = 1;
6574 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6575 return r;
6576 }
6577 }
6578
6579 return 0;
6580 }
6581
6582 static int tgsi_udiv(struct r600_shader_ctx *ctx)
6583 {
6584 return tgsi_divmod(ctx, 0, 0);
6585 }
6586
6587 static int tgsi_umod(struct r600_shader_ctx *ctx)
6588 {
6589 return tgsi_divmod(ctx, 1, 0);
6590 }
6591
6592 static int tgsi_idiv(struct r600_shader_ctx *ctx)
6593 {
6594 return tgsi_divmod(ctx, 0, 1);
6595 }
6596
6597 static int tgsi_imod(struct r600_shader_ctx *ctx)
6598 {
6599 return tgsi_divmod(ctx, 1, 1);
6600 }
6601
6602
6603 static int tgsi_f2i(struct r600_shader_ctx *ctx)
6604 {
6605 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6606 struct r600_bytecode_alu alu;
6607 int i, r;
6608 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6609 int last_inst = tgsi_last_instruction(write_mask);
6610
6611 for (i = 0; i < 4; i++) {
6612 if (!(write_mask & (1<<i)))
6613 continue;
6614
6615 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6616 alu.op = ALU_OP1_TRUNC;
6617
6618 alu.dst.sel = ctx->temp_reg;
6619 alu.dst.chan = i;
6620 alu.dst.write = 1;
6621
6622 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6623 if (i == last_inst)
6624 alu.last = 1;
6625 r = r600_bytecode_add_alu(ctx->bc, &alu);
6626 if (r)
6627 return r;
6628 }
6629
6630 for (i = 0; i < 4; i++) {
6631 if (!(write_mask & (1<<i)))
6632 continue;
6633
6634 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6635 alu.op = ctx->inst_info->op;
6636
6637 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6638
6639 alu.src[0].sel = ctx->temp_reg;
6640 alu.src[0].chan = i;
6641
6642 if (i == last_inst || alu.op == ALU_OP1_FLT_TO_UINT)
6643 alu.last = 1;
6644 r = r600_bytecode_add_alu(ctx->bc, &alu);
6645 if (r)
6646 return r;
6647 }
6648
6649 return 0;
6650 }
6651
6652 static int tgsi_iabs(struct r600_shader_ctx *ctx)
6653 {
6654 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6655 struct r600_bytecode_alu alu;
6656 int i, r;
6657 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6658 int last_inst = tgsi_last_instruction(write_mask);
6659
6660 /* tmp = -src */
6661 for (i = 0; i < 4; i++) {
6662 if (!(write_mask & (1<<i)))
6663 continue;
6664
6665 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6666 alu.op = ALU_OP2_SUB_INT;
6667
6668 alu.dst.sel = ctx->temp_reg;
6669 alu.dst.chan = i;
6670 alu.dst.write = 1;
6671
6672 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
6673 alu.src[0].sel = V_SQ_ALU_SRC_0;
6674
6675 if (i == last_inst)
6676 alu.last = 1;
6677 r = r600_bytecode_add_alu(ctx->bc, &alu);
6678 if (r)
6679 return r;
6680 }
6681
6682 /* dst = (src >= 0 ? src : tmp) */
6683 for (i = 0; i < 4; i++) {
6684 if (!(write_mask & (1<<i)))
6685 continue;
6686
6687 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6688 alu.op = ALU_OP3_CNDGE_INT;
6689 alu.is_op3 = 1;
6690 alu.dst.write = 1;
6691
6692 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6693
6694 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6695 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
6696 alu.src[2].sel = ctx->temp_reg;
6697 alu.src[2].chan = i;
6698
6699 if (i == last_inst)
6700 alu.last = 1;
6701 r = r600_bytecode_add_alu(ctx->bc, &alu);
6702 if (r)
6703 return r;
6704 }
6705 return 0;
6706 }
6707
6708 static int tgsi_issg(struct r600_shader_ctx *ctx)
6709 {
6710 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6711 struct r600_bytecode_alu alu;
6712 int i, r;
6713 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6714 int last_inst = tgsi_last_instruction(write_mask);
6715
6716 /* tmp = (src >= 0 ? src : -1) */
6717 for (i = 0; i < 4; i++) {
6718 if (!(write_mask & (1<<i)))
6719 continue;
6720
6721 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6722 alu.op = ALU_OP3_CNDGE_INT;
6723 alu.is_op3 = 1;
6724
6725 alu.dst.sel = ctx->temp_reg;
6726 alu.dst.chan = i;
6727 alu.dst.write = 1;
6728
6729 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6730 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
6731 alu.src[2].sel = V_SQ_ALU_SRC_M_1_INT;
6732
6733 if (i == last_inst)
6734 alu.last = 1;
6735 r = r600_bytecode_add_alu(ctx->bc, &alu);
6736 if (r)
6737 return r;
6738 }
6739
6740 /* dst = (tmp > 0 ? 1 : tmp) */
6741 for (i = 0; i < 4; i++) {
6742 if (!(write_mask & (1<<i)))
6743 continue;
6744
6745 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6746 alu.op = ALU_OP3_CNDGT_INT;
6747 alu.is_op3 = 1;
6748 alu.dst.write = 1;
6749
6750 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6751
6752 alu.src[0].sel = ctx->temp_reg;
6753 alu.src[0].chan = i;
6754
6755 alu.src[1].sel = V_SQ_ALU_SRC_1_INT;
6756
6757 alu.src[2].sel = ctx->temp_reg;
6758 alu.src[2].chan = i;
6759
6760 if (i == last_inst)
6761 alu.last = 1;
6762 r = r600_bytecode_add_alu(ctx->bc, &alu);
6763 if (r)
6764 return r;
6765 }
6766 return 0;
6767 }
6768
6769
6770
6771 static int tgsi_ssg(struct r600_shader_ctx *ctx)
6772 {
6773 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6774 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6775 int last_inst = tgsi_last_instruction(write_mask);
6776 struct r600_bytecode_alu alu;
6777 int i, r;
6778
6779 /* tmp = (src > 0 ? 1 : src) */
6780 for (i = 0; i <= last_inst; i++) {
6781 if (!(write_mask & (1 << i)))
6782 continue;
6783 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6784 alu.op = ALU_OP3_CNDGT;
6785 alu.is_op3 = 1;
6786
6787 alu.dst.sel = ctx->temp_reg;
6788 alu.dst.chan = i;
6789
6790 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6791 alu.src[1].sel = V_SQ_ALU_SRC_1;
6792 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
6793
6794 if (i == last_inst)
6795 alu.last = 1;
6796 r = r600_bytecode_add_alu(ctx->bc, &alu);
6797 if (r)
6798 return r;
6799 }
6800
6801 /* dst = (-tmp > 0 ? -1 : tmp) */
6802 for (i = 0; i <= last_inst; i++) {
6803 if (!(write_mask & (1 << i)))
6804 continue;
6805 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6806 alu.op = ALU_OP3_CNDGT;
6807 alu.is_op3 = 1;
6808 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6809
6810 alu.src[0].sel = ctx->temp_reg;
6811 alu.src[0].chan = i;
6812 alu.src[0].neg = 1;
6813
6814 alu.src[1].sel = V_SQ_ALU_SRC_1;
6815 alu.src[1].neg = 1;
6816
6817 alu.src[2].sel = ctx->temp_reg;
6818 alu.src[2].chan = i;
6819
6820 if (i == last_inst)
6821 alu.last = 1;
6822 r = r600_bytecode_add_alu(ctx->bc, &alu);
6823 if (r)
6824 return r;
6825 }
6826 return 0;
6827 }
6828
6829 static int tgsi_bfi(struct r600_shader_ctx *ctx)
6830 {
6831 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6832 struct r600_bytecode_alu alu;
6833 int i, r, t1, t2;
6834
6835 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6836 int last_inst = tgsi_last_instruction(write_mask);
6837
6838 t1 = r600_get_temp(ctx);
6839
6840 for (i = 0; i < 4; i++) {
6841 if (!(write_mask & (1<<i)))
6842 continue;
6843
6844 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6845 alu.op = ALU_OP2_SETGE_INT;
6846 r600_bytecode_src(&alu.src[0], &ctx->src[3], i);
6847 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6848 alu.src[1].value = 32;
6849 alu.dst.sel = ctx->temp_reg;
6850 alu.dst.chan = i;
6851 alu.dst.write = 1;
6852 alu.last = i == last_inst;
6853 r = r600_bytecode_add_alu(ctx->bc, &alu);
6854 if (r)
6855 return r;
6856 }
6857
6858 for (i = 0; i < 4; i++) {
6859 if (!(write_mask & (1<<i)))
6860 continue;
6861
6862 /* create mask tmp */
6863 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6864 alu.op = ALU_OP2_BFM_INT;
6865 alu.dst.sel = t1;
6866 alu.dst.chan = i;
6867 alu.dst.write = 1;
6868 alu.last = i == last_inst;
6869
6870 r600_bytecode_src(&alu.src[0], &ctx->src[3], i);
6871 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
6872
6873 r = r600_bytecode_add_alu(ctx->bc, &alu);
6874 if (r)
6875 return r;
6876 }
6877
6878 t2 = r600_get_temp(ctx);
6879
6880 for (i = 0; i < 4; i++) {
6881 if (!(write_mask & (1<<i)))
6882 continue;
6883
6884 /* shift insert left */
6885 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6886 alu.op = ALU_OP2_LSHL_INT;
6887 alu.dst.sel = t2;
6888 alu.dst.chan = i;
6889 alu.dst.write = 1;
6890 alu.last = i == last_inst;
6891
6892 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
6893 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
6894
6895 r = r600_bytecode_add_alu(ctx->bc, &alu);
6896 if (r)
6897 return r;
6898 }
6899
6900 for (i = 0; i < 4; i++) {
6901 if (!(write_mask & (1<<i)))
6902 continue;
6903
6904 /* actual bitfield insert */
6905 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6906 alu.op = ALU_OP3_BFI_INT;
6907 alu.is_op3 = 1;
6908 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6909 alu.dst.chan = i;
6910 alu.dst.write = 1;
6911 alu.last = i == last_inst;
6912
6913 alu.src[0].sel = t1;
6914 alu.src[0].chan = i;
6915 alu.src[1].sel = t2;
6916 alu.src[1].chan = i;
6917 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
6918
6919 r = r600_bytecode_add_alu(ctx->bc, &alu);
6920 if (r)
6921 return r;
6922 }
6923
6924 for (i = 0; i < 4; i++) {
6925 if (!(write_mask & (1<<i)))
6926 continue;
6927 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6928 alu.op = ALU_OP3_CNDE_INT;
6929 alu.is_op3 = 1;
6930 alu.src[0].sel = ctx->temp_reg;
6931 alu.src[0].chan = i;
6932 r600_bytecode_src(&alu.src[2], &ctx->src[1], i);
6933
6934 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6935
6936 alu.src[1].sel = alu.dst.sel;
6937 alu.src[1].chan = i;
6938
6939 alu.last = i == last_inst;
6940 r = r600_bytecode_add_alu(ctx->bc, &alu);
6941 if (r)
6942 return r;
6943 }
6944 return 0;
6945 }
6946
6947 static int tgsi_msb(struct r600_shader_ctx *ctx)
6948 {
6949 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6950 struct r600_bytecode_alu alu;
6951 int i, r, t1, t2;
6952
6953 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6954 int last_inst = tgsi_last_instruction(write_mask);
6955
6956 assert(ctx->inst_info->op == ALU_OP1_FFBH_INT ||
6957 ctx->inst_info->op == ALU_OP1_FFBH_UINT);
6958
6959 t1 = ctx->temp_reg;
6960
6961 /* bit position is indexed from lsb by TGSI, and from msb by the hardware */
6962 for (i = 0; i < 4; i++) {
6963 if (!(write_mask & (1<<i)))
6964 continue;
6965
6966 /* t1 = FFBH_INT / FFBH_UINT */
6967 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6968 alu.op = ctx->inst_info->op;
6969 alu.dst.sel = t1;
6970 alu.dst.chan = i;
6971 alu.dst.write = 1;
6972 alu.last = i == last_inst;
6973
6974 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6975
6976 r = r600_bytecode_add_alu(ctx->bc, &alu);
6977 if (r)
6978 return r;
6979 }
6980
6981 t2 = r600_get_temp(ctx);
6982
6983 for (i = 0; i < 4; i++) {
6984 if (!(write_mask & (1<<i)))
6985 continue;
6986
6987 /* t2 = 31 - t1 */
6988 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6989 alu.op = ALU_OP2_SUB_INT;
6990 alu.dst.sel = t2;
6991 alu.dst.chan = i;
6992 alu.dst.write = 1;
6993 alu.last = i == last_inst;
6994
6995 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
6996 alu.src[0].value = 31;
6997 alu.src[1].sel = t1;
6998 alu.src[1].chan = i;
6999
7000 r = r600_bytecode_add_alu(ctx->bc, &alu);
7001 if (r)
7002 return r;
7003 }
7004
7005 for (i = 0; i < 4; i++) {
7006 if (!(write_mask & (1<<i)))
7007 continue;
7008
7009 /* result = t1 >= 0 ? t2 : t1 */
7010 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7011 alu.op = ALU_OP3_CNDGE_INT;
7012 alu.is_op3 = 1;
7013 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
7014 alu.dst.chan = i;
7015 alu.dst.write = 1;
7016 alu.last = i == last_inst;
7017
7018 alu.src[0].sel = t1;
7019 alu.src[0].chan = i;
7020 alu.src[1].sel = t2;
7021 alu.src[1].chan = i;
7022 alu.src[2].sel = t1;
7023 alu.src[2].chan = i;
7024
7025 r = r600_bytecode_add_alu(ctx->bc, &alu);
7026 if (r)
7027 return r;
7028 }
7029
7030 return 0;
7031 }
7032
7033 static int tgsi_interp_egcm(struct r600_shader_ctx *ctx)
7034 {
7035 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7036 struct r600_bytecode_alu alu;
7037 int r, i = 0, k, interp_gpr, interp_base_chan, tmp, lasti;
7038 unsigned location;
7039 const int input = inst->Src[0].Register.Index + ctx->shader->nsys_inputs;
7040
7041 assert(inst->Src[0].Register.File == TGSI_FILE_INPUT);
7042
7043 /* Interpolators have been marked for use already by allocate_system_value_inputs */
7044 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
7045 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
7046 location = TGSI_INTERPOLATE_LOC_CENTER; /* sample offset will be added explicitly */
7047 }
7048 else {
7049 location = TGSI_INTERPOLATE_LOC_CENTROID;
7050 ctx->shader->input[input].uses_interpolate_at_centroid = 1;
7051 }
7052
7053 k = eg_get_interpolator_index(ctx->shader->input[input].interpolate, location);
7054 if (k < 0)
7055 k = 0;
7056 interp_gpr = ctx->eg_interpolators[k].ij_index / 2;
7057 interp_base_chan = 2 * (ctx->eg_interpolators[k].ij_index % 2);
7058
7059 /* NOTE: currently offset is not perspective correct */
7060 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
7061 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
7062 int sample_gpr = -1;
7063 int gradientsH, gradientsV;
7064 struct r600_bytecode_tex tex;
7065
7066 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
7067 sample_gpr = load_sample_position(ctx, &ctx->src[1], ctx->src[1].swizzle[0]);
7068 }
7069
7070 gradientsH = r600_get_temp(ctx);
7071 gradientsV = r600_get_temp(ctx);
7072 for (i = 0; i < 2; i++) {
7073 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
7074 tex.op = i == 0 ? FETCH_OP_GET_GRADIENTS_H : FETCH_OP_GET_GRADIENTS_V;
7075 tex.src_gpr = interp_gpr;
7076 tex.src_sel_x = interp_base_chan + 0;
7077 tex.src_sel_y = interp_base_chan + 1;
7078 tex.src_sel_z = 0;
7079 tex.src_sel_w = 0;
7080 tex.dst_gpr = i == 0 ? gradientsH : gradientsV;
7081 tex.dst_sel_x = 0;
7082 tex.dst_sel_y = 1;
7083 tex.dst_sel_z = 7;
7084 tex.dst_sel_w = 7;
7085 tex.inst_mod = 1; // Use per pixel gradient calculation
7086 tex.sampler_id = 0;
7087 tex.resource_id = tex.sampler_id;
7088 r = r600_bytecode_add_tex(ctx->bc, &tex);
7089 if (r)
7090 return r;
7091 }
7092
7093 for (i = 0; i < 2; i++) {
7094 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7095 alu.op = ALU_OP3_MULADD;
7096 alu.is_op3 = 1;
7097 alu.src[0].sel = gradientsH;
7098 alu.src[0].chan = i;
7099 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
7100 alu.src[1].sel = sample_gpr;
7101 alu.src[1].chan = 2;
7102 }
7103 else {
7104 r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
7105 }
7106 alu.src[2].sel = interp_gpr;
7107 alu.src[2].chan = interp_base_chan + i;
7108 alu.dst.sel = ctx->temp_reg;
7109 alu.dst.chan = i;
7110 alu.last = i == 1;
7111
7112 r = r600_bytecode_add_alu(ctx->bc, &alu);
7113 if (r)
7114 return r;
7115 }
7116
7117 for (i = 0; i < 2; i++) {
7118 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7119 alu.op = ALU_OP3_MULADD;
7120 alu.is_op3 = 1;
7121 alu.src[0].sel = gradientsV;
7122 alu.src[0].chan = i;
7123 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
7124 alu.src[1].sel = sample_gpr;
7125 alu.src[1].chan = 3;
7126 }
7127 else {
7128 r600_bytecode_src(&alu.src[1], &ctx->src[1], 1);
7129 }
7130 alu.src[2].sel = ctx->temp_reg;
7131 alu.src[2].chan = i;
7132 alu.dst.sel = ctx->temp_reg;
7133 alu.dst.chan = i;
7134 alu.last = i == 1;
7135
7136 r = r600_bytecode_add_alu(ctx->bc, &alu);
7137 if (r)
7138 return r;
7139 }
7140 }
7141
7142 tmp = r600_get_temp(ctx);
7143 for (i = 0; i < 8; i++) {
7144 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7145 alu.op = i < 4 ? ALU_OP2_INTERP_ZW : ALU_OP2_INTERP_XY;
7146
7147 alu.dst.sel = tmp;
7148 if ((i > 1 && i < 6)) {
7149 alu.dst.write = 1;
7150 }
7151 else {
7152 alu.dst.write = 0;
7153 }
7154 alu.dst.chan = i % 4;
7155
7156 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
7157 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
7158 alu.src[0].sel = ctx->temp_reg;
7159 alu.src[0].chan = 1 - (i % 2);
7160 } else {
7161 alu.src[0].sel = interp_gpr;
7162 alu.src[0].chan = interp_base_chan + 1 - (i % 2);
7163 }
7164 alu.src[1].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
7165 alu.src[1].chan = 0;
7166
7167 alu.last = i % 4 == 3;
7168 alu.bank_swizzle_force = SQ_ALU_VEC_210;
7169
7170 r = r600_bytecode_add_alu(ctx->bc, &alu);
7171 if (r)
7172 return r;
7173 }
7174
7175 // INTERP can't swizzle dst
7176 lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
7177 for (i = 0; i <= lasti; i++) {
7178 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
7179 continue;
7180
7181 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7182 alu.op = ALU_OP1_MOV;
7183 alu.src[0].sel = tmp;
7184 alu.src[0].chan = ctx->src[0].swizzle[i];
7185 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
7186 alu.dst.write = 1;
7187 alu.last = i == lasti;
7188 r = r600_bytecode_add_alu(ctx->bc, &alu);
7189 if (r)
7190 return r;
7191 }
7192
7193 return 0;
7194 }
7195
7196
7197 static int tgsi_helper_copy(struct r600_shader_ctx *ctx, struct tgsi_full_instruction *inst)
7198 {
7199 struct r600_bytecode_alu alu;
7200 int i, r;
7201
7202 for (i = 0; i < 4; i++) {
7203 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7204 if (!(inst->Dst[0].Register.WriteMask & (1 << i))) {
7205 alu.op = ALU_OP0_NOP;
7206 alu.dst.chan = i;
7207 } else {
7208 alu.op = ALU_OP1_MOV;
7209 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
7210 alu.src[0].sel = ctx->temp_reg;
7211 alu.src[0].chan = i;
7212 }
7213 if (i == 3) {
7214 alu.last = 1;
7215 }
7216 r = r600_bytecode_add_alu(ctx->bc, &alu);
7217 if (r)
7218 return r;
7219 }
7220 return 0;
7221 }
7222
7223 static int tgsi_make_src_for_op3(struct r600_shader_ctx *ctx,
7224 unsigned writemask,
7225 struct r600_bytecode_alu_src *bc_src,
7226 const struct r600_shader_src *shader_src)
7227 {
7228 struct r600_bytecode_alu alu;
7229 int i, r;
7230 int lasti = tgsi_last_instruction(writemask);
7231 int temp_reg = 0;
7232
7233 r600_bytecode_src(&bc_src[0], shader_src, 0);
7234 r600_bytecode_src(&bc_src[1], shader_src, 1);
7235 r600_bytecode_src(&bc_src[2], shader_src, 2);
7236 r600_bytecode_src(&bc_src[3], shader_src, 3);
7237
7238 if (bc_src->abs) {
7239 temp_reg = r600_get_temp(ctx);
7240
7241 for (i = 0; i < lasti + 1; i++) {
7242 if (!(writemask & (1 << i)))
7243 continue;
7244 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7245 alu.op = ALU_OP1_MOV;
7246 alu.dst.sel = temp_reg;
7247 alu.dst.chan = i;
7248 alu.dst.write = 1;
7249 alu.src[0] = bc_src[i];
7250 if (i == lasti) {
7251 alu.last = 1;
7252 }
7253 r = r600_bytecode_add_alu(ctx->bc, &alu);
7254 if (r)
7255 return r;
7256 memset(&bc_src[i], 0, sizeof(*bc_src));
7257 bc_src[i].sel = temp_reg;
7258 bc_src[i].chan = i;
7259 }
7260 }
7261 return 0;
7262 }
7263
7264 static int tgsi_op3_dst(struct r600_shader_ctx *ctx, int dst)
7265 {
7266 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7267 struct r600_bytecode_alu alu;
7268 struct r600_bytecode_alu_src srcs[4][4];
7269 int i, j, r;
7270 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
7271 unsigned op = ctx->inst_info->op;
7272
7273 if (op == ALU_OP3_MULADD_IEEE &&
7274 ctx->info.properties[TGSI_PROPERTY_MUL_ZERO_WINS])
7275 op = ALU_OP3_MULADD;
7276
7277 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
7278 r = tgsi_make_src_for_op3(ctx, inst->Dst[0].Register.WriteMask,
7279 srcs[j], &ctx->src[j]);
7280 if (r)
7281 return r;
7282 }
7283
7284 for (i = 0; i < lasti + 1; i++) {
7285 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
7286 continue;
7287
7288 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7289 alu.op = op;
7290 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
7291 alu.src[j] = srcs[j][i];
7292 }
7293
7294 if (dst == -1) {
7295 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
7296 } else {
7297 alu.dst.sel = dst;
7298 }
7299 alu.dst.chan = i;
7300 alu.dst.write = 1;
7301 alu.is_op3 = 1;
7302 if (i == lasti) {
7303 alu.last = 1;
7304 }
7305 r = r600_bytecode_add_alu(ctx->bc, &alu);
7306 if (r)
7307 return r;
7308 }
7309 return 0;
7310 }
7311
7312 static int tgsi_op3(struct r600_shader_ctx *ctx)
7313 {
7314 return tgsi_op3_dst(ctx, -1);
7315 }
7316
7317 static int tgsi_dp(struct r600_shader_ctx *ctx)
7318 {
7319 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7320 struct r600_bytecode_alu alu;
7321 int i, j, r;
7322 unsigned op = ctx->inst_info->op;
7323 if (op == ALU_OP2_DOT4_IEEE &&
7324 ctx->info.properties[TGSI_PROPERTY_MUL_ZERO_WINS])
7325 op = ALU_OP2_DOT4;
7326
7327 for (i = 0; i < 4; i++) {
7328 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7329 alu.op = op;
7330 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
7331 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
7332 }
7333
7334 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
7335 alu.dst.chan = i;
7336 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
7337 /* handle some special cases */
7338 switch (inst->Instruction.Opcode) {
7339 case TGSI_OPCODE_DP2:
7340 if (i > 1) {
7341 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
7342 alu.src[0].chan = alu.src[1].chan = 0;
7343 }
7344 break;
7345 case TGSI_OPCODE_DP3:
7346 if (i > 2) {
7347 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
7348 alu.src[0].chan = alu.src[1].chan = 0;
7349 }
7350 break;
7351 default:
7352 break;
7353 }
7354 if (i == 3) {
7355 alu.last = 1;
7356 }
7357 r = r600_bytecode_add_alu(ctx->bc, &alu);
7358 if (r)
7359 return r;
7360 }
7361 return 0;
7362 }
7363
7364 static inline boolean tgsi_tex_src_requires_loading(struct r600_shader_ctx *ctx,
7365 unsigned index)
7366 {
7367 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7368 return (inst->Src[index].Register.File != TGSI_FILE_TEMPORARY &&
7369 inst->Src[index].Register.File != TGSI_FILE_INPUT &&
7370 inst->Src[index].Register.File != TGSI_FILE_OUTPUT) ||
7371 ctx->src[index].neg || ctx->src[index].abs ||
7372 (inst->Src[index].Register.File == TGSI_FILE_INPUT && ctx->type == PIPE_SHADER_GEOMETRY);
7373 }
7374
7375 static inline unsigned tgsi_tex_get_src_gpr(struct r600_shader_ctx *ctx,
7376 unsigned index)
7377 {
7378 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7379 return ctx->file_offset[inst->Src[index].Register.File] + inst->Src[index].Register.Index;
7380 }
7381
7382 static int do_vtx_fetch_inst(struct r600_shader_ctx *ctx, boolean src_requires_loading)
7383 {
7384 struct r600_bytecode_vtx vtx;
7385 struct r600_bytecode_alu alu;
7386 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7387 int src_gpr, r, i;
7388 int id = tgsi_tex_get_src_gpr(ctx, 1);
7389 int sampler_index_mode = inst->Src[1].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
7390
7391 src_gpr = tgsi_tex_get_src_gpr(ctx, 0);
7392 if (src_requires_loading) {
7393 for (i = 0; i < 4; i++) {
7394 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7395 alu.op = ALU_OP1_MOV;
7396 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7397 alu.dst.sel = ctx->temp_reg;
7398 alu.dst.chan = i;
7399 if (i == 3)
7400 alu.last = 1;
7401 alu.dst.write = 1;
7402 r = r600_bytecode_add_alu(ctx->bc, &alu);
7403 if (r)
7404 return r;
7405 }
7406 src_gpr = ctx->temp_reg;
7407 }
7408
7409 memset(&vtx, 0, sizeof(vtx));
7410 vtx.op = FETCH_OP_VFETCH;
7411 vtx.buffer_id = id + R600_MAX_CONST_BUFFERS;
7412 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
7413 vtx.src_gpr = src_gpr;
7414 vtx.mega_fetch_count = 16;
7415 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
7416 vtx.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; /* SEL_X */
7417 vtx.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7; /* SEL_Y */
7418 vtx.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7; /* SEL_Z */
7419 vtx.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7; /* SEL_W */
7420 vtx.use_const_fields = 1;
7421 vtx.buffer_index_mode = sampler_index_mode;
7422
7423 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
7424 return r;
7425
7426 if (ctx->bc->chip_class >= EVERGREEN)
7427 return 0;
7428
7429 for (i = 0; i < 4; i++) {
7430 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
7431 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
7432 continue;
7433
7434 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7435 alu.op = ALU_OP2_AND_INT;
7436
7437 alu.dst.chan = i;
7438 alu.dst.sel = vtx.dst_gpr;
7439 alu.dst.write = 1;
7440
7441 alu.src[0].sel = vtx.dst_gpr;
7442 alu.src[0].chan = i;
7443
7444 alu.src[1].sel = R600_SHADER_BUFFER_INFO_SEL;
7445 alu.src[1].sel += (id * 2);
7446 alu.src[1].chan = i % 4;
7447 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
7448
7449 if (i == lasti)
7450 alu.last = 1;
7451 r = r600_bytecode_add_alu(ctx->bc, &alu);
7452 if (r)
7453 return r;
7454 }
7455
7456 if (inst->Dst[0].Register.WriteMask & 3) {
7457 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7458 alu.op = ALU_OP2_OR_INT;
7459
7460 alu.dst.chan = 3;
7461 alu.dst.sel = vtx.dst_gpr;
7462 alu.dst.write = 1;
7463
7464 alu.src[0].sel = vtx.dst_gpr;
7465 alu.src[0].chan = 3;
7466
7467 alu.src[1].sel = R600_SHADER_BUFFER_INFO_SEL + (id * 2) + 1;
7468 alu.src[1].chan = 0;
7469 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
7470
7471 alu.last = 1;
7472 r = r600_bytecode_add_alu(ctx->bc, &alu);
7473 if (r)
7474 return r;
7475 }
7476 return 0;
7477 }
7478
7479 static int r600_do_buffer_txq(struct r600_shader_ctx *ctx, int reg_idx, int offset, int eg_buffer_base)
7480 {
7481 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7482 int r;
7483 int id = tgsi_tex_get_src_gpr(ctx, reg_idx) + offset;
7484 int sampler_index_mode = inst->Src[reg_idx].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
7485
7486 if (ctx->bc->chip_class < EVERGREEN) {
7487 struct r600_bytecode_alu alu;
7488 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7489 alu.op = ALU_OP1_MOV;
7490 alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
7491 /* r600 we have them at channel 2 of the second dword */
7492 alu.src[0].sel += (id * 2) + 1;
7493 alu.src[0].chan = 1;
7494 alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
7495 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
7496 alu.last = 1;
7497 r = r600_bytecode_add_alu(ctx->bc, &alu);
7498 if (r)
7499 return r;
7500 return 0;
7501 } else {
7502 struct r600_bytecode_vtx vtx;
7503 memset(&vtx, 0, sizeof(vtx));
7504 vtx.op = FETCH_OP_GET_BUFFER_RESINFO;
7505 vtx.buffer_id = id + eg_buffer_base;
7506 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
7507 vtx.src_gpr = 0;
7508 vtx.mega_fetch_count = 16; /* no idea here really... */
7509 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
7510 vtx.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; /* SEL_X */
7511 vtx.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 4 : 7; /* SEL_Y */
7512 vtx.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 4 : 7; /* SEL_Z */
7513 vtx.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 4 : 7; /* SEL_W */
7514 vtx.data_format = FMT_32_32_32_32;
7515 vtx.buffer_index_mode = sampler_index_mode;
7516
7517 if ((r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx)))
7518 return r;
7519 return 0;
7520 }
7521 }
7522
7523
7524 static int tgsi_tex(struct r600_shader_ctx *ctx)
7525 {
7526 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7527 struct r600_bytecode_tex tex;
7528 struct r600_bytecode_tex grad_offs[3];
7529 struct r600_bytecode_alu alu;
7530 unsigned src_gpr;
7531 int r, i, j, n_grad_offs = 0;
7532 int opcode;
7533 bool read_compressed_msaa = ctx->bc->has_compressed_msaa_texturing &&
7534 inst->Instruction.Opcode == TGSI_OPCODE_TXF &&
7535 (inst->Texture.Texture == TGSI_TEXTURE_2D_MSAA ||
7536 inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY_MSAA);
7537
7538 bool txf_add_offsets = inst->Texture.NumOffsets &&
7539 inst->Instruction.Opcode == TGSI_OPCODE_TXF &&
7540 inst->Texture.Texture != TGSI_TEXTURE_BUFFER;
7541
7542 /* Texture fetch instructions can only use gprs as source.
7543 * Also they cannot negate the source or take the absolute value */
7544 const boolean src_requires_loading = (inst->Instruction.Opcode != TGSI_OPCODE_TXQS &&
7545 tgsi_tex_src_requires_loading(ctx, 0)) ||
7546 read_compressed_msaa || txf_add_offsets;
7547
7548 boolean src_loaded = FALSE;
7549 unsigned sampler_src_reg = 1;
7550 int8_t offset_x = 0, offset_y = 0, offset_z = 0;
7551 boolean has_txq_cube_array_z = false;
7552 unsigned sampler_index_mode;
7553 int array_index_offset_channel = -1;
7554
7555 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ &&
7556 ((inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
7557 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY)))
7558 if (inst->Dst[0].Register.WriteMask & 4) {
7559 ctx->shader->has_txq_cube_array_z_comp = true;
7560 has_txq_cube_array_z = true;
7561 }
7562
7563 if (inst->Instruction.Opcode == TGSI_OPCODE_TEX2 ||
7564 inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
7565 inst->Instruction.Opcode == TGSI_OPCODE_TXL2 ||
7566 inst->Instruction.Opcode == TGSI_OPCODE_TG4)
7567 sampler_src_reg = 2;
7568
7569 /* TGSI moves the sampler to src reg 3 for TXD */
7570 if (inst->Instruction.Opcode == TGSI_OPCODE_TXD)
7571 sampler_src_reg = 3;
7572
7573 sampler_index_mode = inst->Src[sampler_src_reg].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
7574
7575 src_gpr = tgsi_tex_get_src_gpr(ctx, 0);
7576
7577 if (inst->Texture.Texture == TGSI_TEXTURE_BUFFER) {
7578 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ) {
7579 if (ctx->bc->chip_class < EVERGREEN)
7580 ctx->shader->uses_tex_buffers = true;
7581 return r600_do_buffer_txq(ctx, 1, 0, R600_MAX_CONST_BUFFERS);
7582 }
7583 else if (inst->Instruction.Opcode == TGSI_OPCODE_TXF) {
7584 if (ctx->bc->chip_class < EVERGREEN)
7585 ctx->shader->uses_tex_buffers = true;
7586 return do_vtx_fetch_inst(ctx, src_requires_loading);
7587 }
7588 }
7589
7590 if (inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
7591 int out_chan;
7592 /* Add perspective divide */
7593 if (ctx->bc->chip_class == CAYMAN) {
7594 out_chan = 2;
7595 for (i = 0; i < 3; i++) {
7596 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7597 alu.op = ALU_OP1_RECIP_IEEE;
7598 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7599
7600 alu.dst.sel = ctx->temp_reg;
7601 alu.dst.chan = i;
7602 if (i == 2)
7603 alu.last = 1;
7604 if (out_chan == i)
7605 alu.dst.write = 1;
7606 r = r600_bytecode_add_alu(ctx->bc, &alu);
7607 if (r)
7608 return r;
7609 }
7610
7611 } else {
7612 out_chan = 3;
7613 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7614 alu.op = ALU_OP1_RECIP_IEEE;
7615 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7616
7617 alu.dst.sel = ctx->temp_reg;
7618 alu.dst.chan = out_chan;
7619 alu.last = 1;
7620 alu.dst.write = 1;
7621 r = r600_bytecode_add_alu(ctx->bc, &alu);
7622 if (r)
7623 return r;
7624 }
7625
7626 for (i = 0; i < 3; i++) {
7627 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7628 alu.op = ALU_OP2_MUL;
7629 alu.src[0].sel = ctx->temp_reg;
7630 alu.src[0].chan = out_chan;
7631 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
7632 alu.dst.sel = ctx->temp_reg;
7633 alu.dst.chan = i;
7634 alu.dst.write = 1;
7635 r = r600_bytecode_add_alu(ctx->bc, &alu);
7636 if (r)
7637 return r;
7638 }
7639 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7640 alu.op = ALU_OP1_MOV;
7641 alu.src[0].sel = V_SQ_ALU_SRC_1;
7642 alu.src[0].chan = 0;
7643 alu.dst.sel = ctx->temp_reg;
7644 alu.dst.chan = 3;
7645 alu.last = 1;
7646 alu.dst.write = 1;
7647 r = r600_bytecode_add_alu(ctx->bc, &alu);
7648 if (r)
7649 return r;
7650 src_loaded = TRUE;
7651 src_gpr = ctx->temp_reg;
7652 }
7653
7654
7655 if ((inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
7656 inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
7657 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
7658 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) &&
7659 inst->Instruction.Opcode != TGSI_OPCODE_TXQ) {
7660
7661 static const unsigned src0_swizzle[] = {2, 2, 0, 1};
7662 static const unsigned src1_swizzle[] = {1, 0, 2, 2};
7663
7664 /* tmp1.xyzw = CUBE(R0.zzxy, R0.yxzz) */
7665 for (i = 0; i < 4; i++) {
7666 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7667 alu.op = ALU_OP2_CUBE;
7668 r600_bytecode_src(&alu.src[0], &ctx->src[0], src0_swizzle[i]);
7669 r600_bytecode_src(&alu.src[1], &ctx->src[0], src1_swizzle[i]);
7670 alu.dst.sel = ctx->temp_reg;
7671 alu.dst.chan = i;
7672 if (i == 3)
7673 alu.last = 1;
7674 alu.dst.write = 1;
7675 r = r600_bytecode_add_alu(ctx->bc, &alu);
7676 if (r)
7677 return r;
7678 }
7679
7680 /* tmp1.z = RCP_e(|tmp1.z|) */
7681 if (ctx->bc->chip_class == CAYMAN) {
7682 for (i = 0; i < 3; i++) {
7683 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7684 alu.op = ALU_OP1_RECIP_IEEE;
7685 alu.src[0].sel = ctx->temp_reg;
7686 alu.src[0].chan = 2;
7687 alu.src[0].abs = 1;
7688 alu.dst.sel = ctx->temp_reg;
7689 alu.dst.chan = i;
7690 if (i == 2)
7691 alu.dst.write = 1;
7692 if (i == 2)
7693 alu.last = 1;
7694 r = r600_bytecode_add_alu(ctx->bc, &alu);
7695 if (r)
7696 return r;
7697 }
7698 } else {
7699 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7700 alu.op = ALU_OP1_RECIP_IEEE;
7701 alu.src[0].sel = ctx->temp_reg;
7702 alu.src[0].chan = 2;
7703 alu.src[0].abs = 1;
7704 alu.dst.sel = ctx->temp_reg;
7705 alu.dst.chan = 2;
7706 alu.dst.write = 1;
7707 alu.last = 1;
7708 r = r600_bytecode_add_alu(ctx->bc, &alu);
7709 if (r)
7710 return r;
7711 }
7712
7713 /* MULADD R0.x, R0.x, PS1, (0x3FC00000, 1.5f).x
7714 * MULADD R0.y, R0.y, PS1, (0x3FC00000, 1.5f).x
7715 * muladd has no writemask, have to use another temp
7716 */
7717 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7718 alu.op = ALU_OP3_MULADD;
7719 alu.is_op3 = 1;
7720
7721 alu.src[0].sel = ctx->temp_reg;
7722 alu.src[0].chan = 0;
7723 alu.src[1].sel = ctx->temp_reg;
7724 alu.src[1].chan = 2;
7725
7726 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
7727 alu.src[2].chan = 0;
7728 alu.src[2].value = u_bitcast_f2u(1.5f);
7729
7730 alu.dst.sel = ctx->temp_reg;
7731 alu.dst.chan = 0;
7732 alu.dst.write = 1;
7733
7734 r = r600_bytecode_add_alu(ctx->bc, &alu);
7735 if (r)
7736 return r;
7737
7738 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7739 alu.op = ALU_OP3_MULADD;
7740 alu.is_op3 = 1;
7741
7742 alu.src[0].sel = ctx->temp_reg;
7743 alu.src[0].chan = 1;
7744 alu.src[1].sel = ctx->temp_reg;
7745 alu.src[1].chan = 2;
7746
7747 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
7748 alu.src[2].chan = 0;
7749 alu.src[2].value = u_bitcast_f2u(1.5f);
7750
7751 alu.dst.sel = ctx->temp_reg;
7752 alu.dst.chan = 1;
7753 alu.dst.write = 1;
7754
7755 alu.last = 1;
7756 r = r600_bytecode_add_alu(ctx->bc, &alu);
7757 if (r)
7758 return r;
7759 /* write initial compare value into Z component
7760 - W src 0 for shadow cube
7761 - X src 1 for shadow cube array */
7762 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
7763 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
7764 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7765 alu.op = ALU_OP1_MOV;
7766 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY)
7767 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
7768 else
7769 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7770 alu.dst.sel = ctx->temp_reg;
7771 alu.dst.chan = 2;
7772 alu.dst.write = 1;
7773 alu.last = 1;
7774 r = r600_bytecode_add_alu(ctx->bc, &alu);
7775 if (r)
7776 return r;
7777 }
7778
7779 if (inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
7780 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
7781 if (ctx->bc->chip_class >= EVERGREEN) {
7782 int mytmp = r600_get_temp(ctx);
7783 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7784 alu.op = ALU_OP1_MOV;
7785 alu.src[0].sel = ctx->temp_reg;
7786 alu.src[0].chan = 3;
7787 alu.dst.sel = mytmp;
7788 alu.dst.chan = 0;
7789 alu.dst.write = 1;
7790 alu.last = 1;
7791 r = r600_bytecode_add_alu(ctx->bc, &alu);
7792 if (r)
7793 return r;
7794
7795 /* Evaluate the array index according to floor(idx + 0.5). This
7796 * needs to be done before merging the face select value, because
7797 * otherwise the fractional part of the array index will interfere
7798 * with the face select value */
7799 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7800 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7801 alu.op = ALU_OP1_RNDNE;
7802 alu.dst.sel = ctx->temp_reg;
7803 alu.dst.chan = 3;
7804 alu.dst.write = 1;
7805 alu.last = 1;
7806 r = r600_bytecode_add_alu(ctx->bc, &alu);
7807 if (r)
7808 return r;
7809
7810 /* Because the array slice index and the cube face index are merged
7811 * into one value we have to make sure the array slice index is >= 0,
7812 * otherwise the face selection will fail */
7813 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7814 alu.op = ALU_OP2_MAX;
7815 alu.src[0].sel = ctx->temp_reg;
7816 alu.src[0].chan = 3;
7817 alu.src[1].sel = V_SQ_ALU_SRC_0;
7818 alu.dst.sel = ctx->temp_reg;
7819 alu.dst.chan = 3;
7820 alu.dst.write = 1;
7821 alu.last = 1;
7822 r = r600_bytecode_add_alu(ctx->bc, &alu);
7823 if (r)
7824 return r;
7825
7826 /* have to multiply original layer by 8 and add to face id (temp.w) in Z */
7827 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7828 alu.op = ALU_OP3_MULADD;
7829 alu.is_op3 = 1;
7830 alu.src[0].sel = ctx->temp_reg;
7831 alu.src[0].chan = 3;
7832 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
7833 alu.src[1].chan = 0;
7834 alu.src[1].value = u_bitcast_f2u(8.0f);
7835 alu.src[2].sel = mytmp;
7836 alu.src[2].chan = 0;
7837 alu.dst.sel = ctx->temp_reg;
7838 alu.dst.chan = 3;
7839 alu.dst.write = 1;
7840 alu.last = 1;
7841 r = r600_bytecode_add_alu(ctx->bc, &alu);
7842 if (r)
7843 return r;
7844 } else if (ctx->bc->chip_class < EVERGREEN) {
7845 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
7846 tex.op = FETCH_OP_SET_CUBEMAP_INDEX;
7847 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
7848 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
7849 tex.src_gpr = r600_get_temp(ctx);
7850 tex.src_sel_x = 0;
7851 tex.src_sel_y = 0;
7852 tex.src_sel_z = 0;
7853 tex.src_sel_w = 0;
7854 tex.dst_sel_x = tex.dst_sel_y = tex.dst_sel_z = tex.dst_sel_w = 7;
7855 tex.coord_type_x = 1;
7856 tex.coord_type_y = 1;
7857 tex.coord_type_z = 1;
7858 tex.coord_type_w = 1;
7859 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7860 alu.op = ALU_OP1_MOV;
7861 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7862 alu.dst.sel = tex.src_gpr;
7863 alu.dst.chan = 0;
7864 alu.last = 1;
7865 alu.dst.write = 1;
7866 r = r600_bytecode_add_alu(ctx->bc, &alu);
7867 if (r)
7868 return r;
7869
7870 r = r600_bytecode_add_tex(ctx->bc, &tex);
7871 if (r)
7872 return r;
7873 }
7874
7875 }
7876
7877 /* for cube forms of lod and bias we need to route things */
7878 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB ||
7879 inst->Instruction.Opcode == TGSI_OPCODE_TXL ||
7880 inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
7881 inst->Instruction.Opcode == TGSI_OPCODE_TXL2) {
7882 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7883 alu.op = ALU_OP1_MOV;
7884 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
7885 inst->Instruction.Opcode == TGSI_OPCODE_TXL2)
7886 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
7887 else
7888 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7889 alu.dst.sel = ctx->temp_reg;
7890 alu.dst.chan = 2;
7891 alu.last = 1;
7892 alu.dst.write = 1;
7893 r = r600_bytecode_add_alu(ctx->bc, &alu);
7894 if (r)
7895 return r;
7896 }
7897
7898 src_loaded = TRUE;
7899 src_gpr = ctx->temp_reg;
7900 }
7901
7902 if (inst->Instruction.Opcode == TGSI_OPCODE_TXD) {
7903 int temp_h = 0, temp_v = 0;
7904 int start_val = 0;
7905
7906 /* if we've already loaded the src (i.e. CUBE don't reload it). */
7907 if (src_loaded == TRUE)
7908 start_val = 1;
7909 else
7910 src_loaded = TRUE;
7911 for (i = start_val; i < 3; i++) {
7912 int treg = r600_get_temp(ctx);
7913
7914 if (i == 0)
7915 src_gpr = treg;
7916 else if (i == 1)
7917 temp_h = treg;
7918 else
7919 temp_v = treg;
7920
7921 for (j = 0; j < 4; j++) {
7922 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7923 alu.op = ALU_OP1_MOV;
7924 r600_bytecode_src(&alu.src[0], &ctx->src[i], j);
7925 alu.dst.sel = treg;
7926 alu.dst.chan = j;
7927 if (j == 3)
7928 alu.last = 1;
7929 alu.dst.write = 1;
7930 r = r600_bytecode_add_alu(ctx->bc, &alu);
7931 if (r)
7932 return r;
7933 }
7934 }
7935 for (i = 1; i < 3; i++) {
7936 /* set gradients h/v */
7937 struct r600_bytecode_tex *t = &grad_offs[n_grad_offs++];
7938 memset(t, 0, sizeof(struct r600_bytecode_tex));
7939 t->op = (i == 1) ? FETCH_OP_SET_GRADIENTS_H :
7940 FETCH_OP_SET_GRADIENTS_V;
7941 t->sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
7942 t->sampler_index_mode = sampler_index_mode;
7943 t->resource_id = t->sampler_id + R600_MAX_CONST_BUFFERS;
7944 t->resource_index_mode = sampler_index_mode;
7945
7946 t->src_gpr = (i == 1) ? temp_h : temp_v;
7947 t->src_sel_x = 0;
7948 t->src_sel_y = 1;
7949 t->src_sel_z = 2;
7950 t->src_sel_w = 3;
7951
7952 t->dst_gpr = r600_get_temp(ctx); /* just to avoid confusing the asm scheduler */
7953 t->dst_sel_x = t->dst_sel_y = t->dst_sel_z = t->dst_sel_w = 7;
7954 if (inst->Texture.Texture != TGSI_TEXTURE_RECT) {
7955 t->coord_type_x = 1;
7956 t->coord_type_y = 1;
7957 t->coord_type_z = 1;
7958 t->coord_type_w = 1;
7959 }
7960 }
7961 }
7962
7963 if (inst->Instruction.Opcode == TGSI_OPCODE_TG4) {
7964 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
7965 * incorrectly forces nearest filtering if the texture format is integer.
7966 * The only effect it has on Gather4, which always returns 4 texels for
7967 * bilinear filtering, is that the final coordinates are off by 0.5 of
7968 * the texel size.
7969 *
7970 * The workaround is to subtract 0.5 from the unnormalized coordinates,
7971 * or (0.5 / size) from the normalized coordinates.
7972 */
7973 if (inst->Texture.ReturnType == TGSI_RETURN_TYPE_SINT ||
7974 inst->Texture.ReturnType == TGSI_RETURN_TYPE_UINT) {
7975 int treg = r600_get_temp(ctx);
7976
7977 /* mov array and comparison oordinate to temp_reg if needed */
7978 if ((inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
7979 inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
7980 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY) && !src_loaded) {
7981 int end = inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ? 3 : 2;
7982 for (i = 2; i <= end; i++) {
7983 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7984 alu.op = ALU_OP1_MOV;
7985 alu.dst.sel = ctx->temp_reg;
7986 alu.dst.chan = i;
7987 alu.dst.write = 1;
7988 alu.last = (i == end);
7989 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7990 r = r600_bytecode_add_alu(ctx->bc, &alu);
7991 if (r)
7992 return r;
7993 }
7994 }
7995
7996 if (inst->Texture.Texture == TGSI_TEXTURE_RECT ||
7997 inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT) {
7998 for (i = 0; i < 2; i++) {
7999 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8000 alu.op = ALU_OP2_ADD;
8001 alu.dst.sel = ctx->temp_reg;
8002 alu.dst.chan = i;
8003 alu.dst.write = 1;
8004 alu.last = i == 1;
8005 if (src_loaded) {
8006 alu.src[0].sel = ctx->temp_reg;
8007 alu.src[0].chan = i;
8008 } else
8009 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
8010 alu.src[1].sel = V_SQ_ALU_SRC_0_5;
8011 alu.src[1].neg = 1;
8012 r = r600_bytecode_add_alu(ctx->bc, &alu);
8013 if (r)
8014 return r;
8015 }
8016 } else {
8017 /* execute a TXQ */
8018 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
8019 tex.op = FETCH_OP_GET_TEXTURE_RESINFO;
8020 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
8021 tex.sampler_index_mode = sampler_index_mode;
8022 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
8023 tex.resource_index_mode = sampler_index_mode;
8024 tex.dst_gpr = treg;
8025 tex.src_sel_x = 4;
8026 tex.src_sel_y = 4;
8027 tex.src_sel_z = 4;
8028 tex.src_sel_w = 4;
8029 tex.dst_sel_x = 0;
8030 tex.dst_sel_y = 1;
8031 tex.dst_sel_z = 7;
8032 tex.dst_sel_w = 7;
8033 r = r600_bytecode_add_tex(ctx->bc, &tex);
8034 if (r)
8035 return r;
8036
8037 /* coord.xy = -0.5 * (1.0/int_to_flt(size)) + coord.xy */
8038 if (ctx->bc->chip_class == CAYMAN) {
8039 /* */
8040 for (i = 0; i < 2; i++) {
8041 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8042 alu.op = ALU_OP1_INT_TO_FLT;
8043 alu.dst.sel = treg;
8044 alu.dst.chan = i;
8045 alu.dst.write = 1;
8046 alu.src[0].sel = treg;
8047 alu.src[0].chan = i;
8048 alu.last = (i == 1) ? 1 : 0;
8049 r = r600_bytecode_add_alu(ctx->bc, &alu);
8050 if (r)
8051 return r;
8052 }
8053 for (j = 0; j < 2; j++) {
8054 for (i = 0; i < 3; i++) {
8055 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8056 alu.op = ALU_OP1_RECIP_IEEE;
8057 alu.src[0].sel = treg;
8058 alu.src[0].chan = j;
8059 alu.dst.sel = treg;
8060 alu.dst.chan = i;
8061 if (i == 2)
8062 alu.last = 1;
8063 if (i == j)
8064 alu.dst.write = 1;
8065 r = r600_bytecode_add_alu(ctx->bc, &alu);
8066 if (r)
8067 return r;
8068 }
8069 }
8070 } else {
8071 for (i = 0; i < 2; i++) {
8072 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8073 alu.op = ALU_OP1_INT_TO_FLT;
8074 alu.dst.sel = treg;
8075 alu.dst.chan = i;
8076 alu.dst.write = 1;
8077 alu.src[0].sel = treg;
8078 alu.src[0].chan = i;
8079 alu.last = 1;
8080 r = r600_bytecode_add_alu(ctx->bc, &alu);
8081 if (r)
8082 return r;
8083 }
8084 for (i = 0; i < 2; i++) {
8085 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8086 alu.op = ALU_OP1_RECIP_IEEE;
8087 alu.src[0].sel = treg;
8088 alu.src[0].chan = i;
8089 alu.dst.sel = treg;
8090 alu.dst.chan = i;
8091 alu.last = 1;
8092 alu.dst.write = 1;
8093 r = r600_bytecode_add_alu(ctx->bc, &alu);
8094 if (r)
8095 return r;
8096 }
8097 }
8098 for (i = 0; i < 2; i++) {
8099 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8100 alu.op = ALU_OP3_MULADD;
8101 alu.is_op3 = 1;
8102 alu.dst.sel = ctx->temp_reg;
8103 alu.dst.chan = i;
8104 alu.dst.write = 1;
8105 alu.last = i == 1;
8106 alu.src[0].sel = treg;
8107 alu.src[0].chan = i;
8108 alu.src[1].sel = V_SQ_ALU_SRC_0_5;
8109 alu.src[1].neg = 1;
8110 if (src_loaded) {
8111 alu.src[2].sel = ctx->temp_reg;
8112 alu.src[2].chan = i;
8113 } else
8114 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
8115 r = r600_bytecode_add_alu(ctx->bc, &alu);
8116 if (r)
8117 return r;
8118 }
8119 }
8120 src_loaded = TRUE;
8121 src_gpr = ctx->temp_reg;
8122 }
8123 }
8124
8125 if (src_requires_loading && !src_loaded) {
8126 for (i = 0; i < 4; i++) {
8127 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8128 alu.op = ALU_OP1_MOV;
8129 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
8130 alu.dst.sel = ctx->temp_reg;
8131 alu.dst.chan = i;
8132 if (i == 3)
8133 alu.last = 1;
8134 alu.dst.write = 1;
8135 r = r600_bytecode_add_alu(ctx->bc, &alu);
8136 if (r)
8137 return r;
8138 }
8139 src_loaded = TRUE;
8140 src_gpr = ctx->temp_reg;
8141 }
8142
8143 /* get offset values */
8144 if (inst->Texture.NumOffsets) {
8145 assert(inst->Texture.NumOffsets == 1);
8146
8147 /* The texture offset feature doesn't work with the TXF instruction
8148 * and must be emulated by adding the offset to the texture coordinates. */
8149 if (txf_add_offsets) {
8150 const struct tgsi_texture_offset *off = inst->TexOffsets;
8151
8152 switch (inst->Texture.Texture) {
8153 case TGSI_TEXTURE_3D:
8154 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8155 alu.op = ALU_OP2_ADD_INT;
8156 alu.src[0].sel = src_gpr;
8157 alu.src[0].chan = 2;
8158 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
8159 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleZ];
8160 alu.dst.sel = src_gpr;
8161 alu.dst.chan = 2;
8162 alu.dst.write = 1;
8163 alu.last = 1;
8164 r = r600_bytecode_add_alu(ctx->bc, &alu);
8165 if (r)
8166 return r;
8167 /* fall through */
8168
8169 case TGSI_TEXTURE_2D:
8170 case TGSI_TEXTURE_SHADOW2D:
8171 case TGSI_TEXTURE_RECT:
8172 case TGSI_TEXTURE_SHADOWRECT:
8173 case TGSI_TEXTURE_2D_ARRAY:
8174 case TGSI_TEXTURE_SHADOW2D_ARRAY:
8175 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8176 alu.op = ALU_OP2_ADD_INT;
8177 alu.src[0].sel = src_gpr;
8178 alu.src[0].chan = 1;
8179 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
8180 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleY];
8181 alu.dst.sel = src_gpr;
8182 alu.dst.chan = 1;
8183 alu.dst.write = 1;
8184 alu.last = 1;
8185 r = r600_bytecode_add_alu(ctx->bc, &alu);
8186 if (r)
8187 return r;
8188 /* fall through */
8189
8190 case TGSI_TEXTURE_1D:
8191 case TGSI_TEXTURE_SHADOW1D:
8192 case TGSI_TEXTURE_1D_ARRAY:
8193 case TGSI_TEXTURE_SHADOW1D_ARRAY:
8194 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8195 alu.op = ALU_OP2_ADD_INT;
8196 alu.src[0].sel = src_gpr;
8197 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
8198 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleX];
8199 alu.dst.sel = src_gpr;
8200 alu.dst.write = 1;
8201 alu.last = 1;
8202 r = r600_bytecode_add_alu(ctx->bc, &alu);
8203 if (r)
8204 return r;
8205 break;
8206 /* texture offsets do not apply to other texture targets */
8207 }
8208 } else {
8209 switch (inst->Texture.Texture) {
8210 case TGSI_TEXTURE_3D:
8211 offset_z = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleZ] << 1;
8212 /* fallthrough */
8213 case TGSI_TEXTURE_2D:
8214 case TGSI_TEXTURE_SHADOW2D:
8215 case TGSI_TEXTURE_RECT:
8216 case TGSI_TEXTURE_SHADOWRECT:
8217 case TGSI_TEXTURE_2D_ARRAY:
8218 case TGSI_TEXTURE_SHADOW2D_ARRAY:
8219 offset_y = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleY] << 1;
8220 /* fallthrough */
8221 case TGSI_TEXTURE_1D:
8222 case TGSI_TEXTURE_SHADOW1D:
8223 case TGSI_TEXTURE_1D_ARRAY:
8224 case TGSI_TEXTURE_SHADOW1D_ARRAY:
8225 offset_x = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleX] << 1;
8226 }
8227 }
8228 }
8229
8230 /* Obtain the sample index for reading a compressed MSAA color texture.
8231 * To read the FMASK, we use the ldfptr instruction, which tells us
8232 * where the samples are stored.
8233 * For uncompressed 8x MSAA surfaces, ldfptr should return 0x76543210,
8234 * which is the identity mapping. Each nibble says which physical sample
8235 * should be fetched to get that sample.
8236 *
8237 * Assume src.z contains the sample index. It should be modified like this:
8238 * src.z = (ldfptr() >> (src.z * 4)) & 0xF;
8239 * Then fetch the texel with src.
8240 */
8241 if (read_compressed_msaa) {
8242 unsigned sample_chan = 3;
8243 unsigned temp = r600_get_temp(ctx);
8244 assert(src_loaded);
8245
8246 /* temp.w = ldfptr() */
8247 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
8248 tex.op = FETCH_OP_LD;
8249 tex.inst_mod = 1; /* to indicate this is ldfptr */
8250 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
8251 tex.sampler_index_mode = sampler_index_mode;
8252 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
8253 tex.resource_index_mode = sampler_index_mode;
8254 tex.src_gpr = src_gpr;
8255 tex.dst_gpr = temp;
8256 tex.dst_sel_x = 7; /* mask out these components */
8257 tex.dst_sel_y = 7;
8258 tex.dst_sel_z = 7;
8259 tex.dst_sel_w = 0; /* store X */
8260 tex.src_sel_x = 0;
8261 tex.src_sel_y = 1;
8262 tex.src_sel_z = 2;
8263 tex.src_sel_w = 3;
8264 tex.offset_x = offset_x;
8265 tex.offset_y = offset_y;
8266 tex.offset_z = offset_z;
8267 r = r600_bytecode_add_tex(ctx->bc, &tex);
8268 if (r)
8269 return r;
8270
8271 /* temp.x = sample_index*4 */
8272 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8273 alu.op = ALU_OP2_MULLO_INT;
8274 alu.src[0].sel = src_gpr;
8275 alu.src[0].chan = sample_chan;
8276 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
8277 alu.src[1].value = 4;
8278 alu.dst.sel = temp;
8279 alu.dst.chan = 0;
8280 alu.dst.write = 1;
8281 r = emit_mul_int_op(ctx->bc, &alu);
8282 if (r)
8283 return r;
8284
8285 /* sample_index = temp.w >> temp.x */
8286 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8287 alu.op = ALU_OP2_LSHR_INT;
8288 alu.src[0].sel = temp;
8289 alu.src[0].chan = 3;
8290 alu.src[1].sel = temp;
8291 alu.src[1].chan = 0;
8292 alu.dst.sel = src_gpr;
8293 alu.dst.chan = sample_chan;
8294 alu.dst.write = 1;
8295 alu.last = 1;
8296 r = r600_bytecode_add_alu(ctx->bc, &alu);
8297 if (r)
8298 return r;
8299
8300 /* sample_index & 0xF */
8301 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8302 alu.op = ALU_OP2_AND_INT;
8303 alu.src[0].sel = src_gpr;
8304 alu.src[0].chan = sample_chan;
8305 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
8306 alu.src[1].value = 0xF;
8307 alu.dst.sel = src_gpr;
8308 alu.dst.chan = sample_chan;
8309 alu.dst.write = 1;
8310 alu.last = 1;
8311 r = r600_bytecode_add_alu(ctx->bc, &alu);
8312 if (r)
8313 return r;
8314 #if 0
8315 /* visualize the FMASK */
8316 for (i = 0; i < 4; i++) {
8317 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8318 alu.op = ALU_OP1_INT_TO_FLT;
8319 alu.src[0].sel = src_gpr;
8320 alu.src[0].chan = sample_chan;
8321 alu.dst.sel = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
8322 alu.dst.chan = i;
8323 alu.dst.write = 1;
8324 alu.last = 1;
8325 r = r600_bytecode_add_alu(ctx->bc, &alu);
8326 if (r)
8327 return r;
8328 }
8329 return 0;
8330 #endif
8331 }
8332
8333 /* does this shader want a num layers from TXQ for a cube array? */
8334 if (has_txq_cube_array_z) {
8335 int id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
8336
8337 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8338 alu.op = ALU_OP1_MOV;
8339
8340 alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
8341 if (ctx->bc->chip_class >= EVERGREEN) {
8342 /* with eg each dword is number of cubes */
8343 alu.src[0].sel += id / 4;
8344 alu.src[0].chan = id % 4;
8345 } else {
8346 /* r600 we have them at channel 2 of the second dword */
8347 alu.src[0].sel += (id * 2) + 1;
8348 alu.src[0].chan = 2;
8349 }
8350 alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
8351 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
8352 alu.last = 1;
8353 r = r600_bytecode_add_alu(ctx->bc, &alu);
8354 if (r)
8355 return r;
8356 /* disable writemask from texture instruction */
8357 inst->Dst[0].Register.WriteMask &= ~4;
8358 }
8359
8360 opcode = ctx->inst_info->op;
8361 if (opcode == FETCH_OP_GATHER4 &&
8362 inst->TexOffsets[0].File != TGSI_FILE_NULL &&
8363 inst->TexOffsets[0].File != TGSI_FILE_IMMEDIATE) {
8364 struct r600_bytecode_tex *t;
8365 opcode = FETCH_OP_GATHER4_O;
8366
8367 /* GATHER4_O/GATHER4_C_O use offset values loaded by
8368 SET_TEXTURE_OFFSETS instruction. The immediate offset values
8369 encoded in the instruction are ignored. */
8370 t = &grad_offs[n_grad_offs++];
8371 memset(t, 0, sizeof(struct r600_bytecode_tex));
8372 t->op = FETCH_OP_SET_TEXTURE_OFFSETS;
8373 t->sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
8374 t->sampler_index_mode = sampler_index_mode;
8375 t->resource_id = t->sampler_id + R600_MAX_CONST_BUFFERS;
8376 t->resource_index_mode = sampler_index_mode;
8377
8378 t->src_gpr = ctx->file_offset[inst->TexOffsets[0].File] + inst->TexOffsets[0].Index;
8379 t->src_sel_x = inst->TexOffsets[0].SwizzleX;
8380 t->src_sel_y = inst->TexOffsets[0].SwizzleY;
8381 if (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
8382 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY)
8383 /* make sure array index selector is 0, this is just a safety
8384 * precausion because TGSI seems to emit something strange here */
8385 t->src_sel_z = 4;
8386 else
8387 t->src_sel_z = inst->TexOffsets[0].SwizzleZ;
8388
8389 t->src_sel_w = 4;
8390
8391 t->dst_sel_x = 7;
8392 t->dst_sel_y = 7;
8393 t->dst_sel_z = 7;
8394 t->dst_sel_w = 7;
8395 }
8396
8397 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
8398 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
8399 inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
8400 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
8401 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY ||
8402 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
8403 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
8404 switch (opcode) {
8405 case FETCH_OP_SAMPLE:
8406 opcode = FETCH_OP_SAMPLE_C;
8407 break;
8408 case FETCH_OP_SAMPLE_L:
8409 opcode = FETCH_OP_SAMPLE_C_L;
8410 break;
8411 case FETCH_OP_SAMPLE_LB:
8412 opcode = FETCH_OP_SAMPLE_C_LB;
8413 break;
8414 case FETCH_OP_SAMPLE_G:
8415 opcode = FETCH_OP_SAMPLE_C_G;
8416 break;
8417 /* Texture gather variants */
8418 case FETCH_OP_GATHER4:
8419 opcode = FETCH_OP_GATHER4_C;
8420 break;
8421 case FETCH_OP_GATHER4_O:
8422 opcode = FETCH_OP_GATHER4_C_O;
8423 break;
8424 }
8425 }
8426
8427 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
8428 tex.op = opcode;
8429
8430 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
8431 tex.sampler_index_mode = sampler_index_mode;
8432 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
8433 tex.resource_index_mode = sampler_index_mode;
8434 tex.src_gpr = src_gpr;
8435 tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
8436
8437 if (inst->Instruction.Opcode == TGSI_OPCODE_DDX_FINE ||
8438 inst->Instruction.Opcode == TGSI_OPCODE_DDY_FINE) {
8439 tex.inst_mod = 1; /* per pixel gradient calculation instead of per 2x2 quad */
8440 }
8441
8442 if (inst->Instruction.Opcode == TGSI_OPCODE_TG4) {
8443 int8_t texture_component_select = ctx->literals[4 * inst->Src[1].Register.Index + inst->Src[1].Register.SwizzleX];
8444 tex.inst_mod = texture_component_select;
8445
8446 if (ctx->bc->chip_class == CAYMAN) {
8447 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
8448 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
8449 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
8450 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
8451 } else {
8452 /* GATHER4 result order is different from TGSI TG4 */
8453 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 1 : 7;
8454 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 2 : 7;
8455 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 0 : 7;
8456 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
8457 }
8458 }
8459 else if (inst->Instruction.Opcode == TGSI_OPCODE_LODQ) {
8460 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
8461 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
8462 tex.dst_sel_z = 7;
8463 tex.dst_sel_w = 7;
8464 }
8465 else if (inst->Instruction.Opcode == TGSI_OPCODE_TXQS) {
8466 tex.dst_sel_x = 3;
8467 tex.dst_sel_y = 7;
8468 tex.dst_sel_z = 7;
8469 tex.dst_sel_w = 7;
8470 }
8471 else {
8472 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
8473 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
8474 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
8475 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
8476 }
8477
8478
8479 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQS) {
8480 tex.src_sel_x = 4;
8481 tex.src_sel_y = 4;
8482 tex.src_sel_z = 4;
8483 tex.src_sel_w = 4;
8484 } else if (src_loaded) {
8485 tex.src_sel_x = 0;
8486 tex.src_sel_y = 1;
8487 tex.src_sel_z = 2;
8488 tex.src_sel_w = 3;
8489 } else {
8490 tex.src_sel_x = ctx->src[0].swizzle[0];
8491 tex.src_sel_y = ctx->src[0].swizzle[1];
8492 tex.src_sel_z = ctx->src[0].swizzle[2];
8493 tex.src_sel_w = ctx->src[0].swizzle[3];
8494 tex.src_rel = ctx->src[0].rel;
8495 }
8496
8497 if (inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
8498 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
8499 inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
8500 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
8501 tex.src_sel_x = 1;
8502 tex.src_sel_y = 0;
8503 tex.src_sel_z = 3;
8504 tex.src_sel_w = 2; /* route Z compare or Lod value into W */
8505 }
8506
8507 if (inst->Texture.Texture != TGSI_TEXTURE_RECT &&
8508 inst->Texture.Texture != TGSI_TEXTURE_SHADOWRECT) {
8509 tex.coord_type_x = 1;
8510 tex.coord_type_y = 1;
8511 }
8512 tex.coord_type_z = 1;
8513 tex.coord_type_w = 1;
8514
8515 tex.offset_x = offset_x;
8516 tex.offset_y = offset_y;
8517 if (inst->Instruction.Opcode == TGSI_OPCODE_TG4 &&
8518 (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
8519 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY)) {
8520 tex.offset_z = 0;
8521 }
8522 else {
8523 tex.offset_z = offset_z;
8524 }
8525
8526 /* Put the depth for comparison in W.
8527 * TGSI_TEXTURE_SHADOW2D_ARRAY already has the depth in W.
8528 * Some instructions expect the depth in Z. */
8529 if ((inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
8530 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
8531 inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
8532 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY) &&
8533 opcode != FETCH_OP_SAMPLE_C_L &&
8534 opcode != FETCH_OP_SAMPLE_C_LB) {
8535 tex.src_sel_w = tex.src_sel_z;
8536 }
8537
8538 if (inst->Texture.Texture == TGSI_TEXTURE_1D_ARRAY ||
8539 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY) {
8540 if (opcode == FETCH_OP_SAMPLE_C_L ||
8541 opcode == FETCH_OP_SAMPLE_C_LB) {
8542 /* the array index is read from Y */
8543 tex.coord_type_y = 0;
8544 array_index_offset_channel = tex.src_sel_y;
8545 } else {
8546 /* the array index is read from Z */
8547 tex.coord_type_z = 0;
8548 tex.src_sel_z = tex.src_sel_y;
8549 array_index_offset_channel = tex.src_sel_z;
8550 }
8551 } else if (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
8552 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY) {
8553 tex.coord_type_z = 0;
8554 array_index_offset_channel = tex.src_sel_z;
8555 } else if ((inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
8556 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) &&
8557 (ctx->bc->chip_class >= EVERGREEN))
8558 /* the array index is read from Z, coordinate will be corrected elsewhere */
8559 tex.coord_type_z = 0;
8560
8561 /* We have array access to 1D or 2D ARRAY, the coordinates are not int ->
8562 * evaluate the array index */
8563 if (array_index_offset_channel >= 0 &&
8564 opcode != FETCH_OP_LD &&
8565 opcode != FETCH_OP_GET_TEXTURE_RESINFO) {
8566 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8567 alu.src[0].sel = tex.src_gpr;
8568 alu.src[0].chan = array_index_offset_channel;
8569 alu.src[0].rel = tex.src_rel;
8570 alu.op = ALU_OP1_RNDNE;
8571 alu.dst.sel = tex.src_gpr;
8572 alu.dst.chan = array_index_offset_channel;
8573 alu.dst.rel = tex.src_rel;
8574 alu.dst.write = 1;
8575 alu.last = 1;
8576 r = r600_bytecode_add_alu(ctx->bc, &alu);
8577 if (r)
8578 return r;
8579 }
8580
8581 /* mask unused source components */
8582 if (opcode == FETCH_OP_SAMPLE || opcode == FETCH_OP_GATHER4) {
8583 switch (inst->Texture.Texture) {
8584 case TGSI_TEXTURE_2D:
8585 case TGSI_TEXTURE_RECT:
8586 tex.src_sel_z = 7;
8587 tex.src_sel_w = 7;
8588 break;
8589 case TGSI_TEXTURE_1D_ARRAY:
8590 tex.src_sel_y = 7;
8591 tex.src_sel_w = 7;
8592 break;
8593 case TGSI_TEXTURE_1D:
8594 tex.src_sel_y = 7;
8595 tex.src_sel_z = 7;
8596 tex.src_sel_w = 7;
8597 break;
8598 }
8599 }
8600
8601 /* Emit set gradient and offset instructions. */
8602 for (i = 0; i < n_grad_offs; ++i) {
8603 r = r600_bytecode_add_tex(ctx->bc, &grad_offs[i]);
8604 if (r)
8605 return r;
8606 }
8607
8608 r = r600_bytecode_add_tex(ctx->bc, &tex);
8609 if (r)
8610 return r;
8611
8612 /* add shadow ambient support - gallium doesn't do it yet */
8613 return 0;
8614 }
8615
8616 static int find_hw_atomic_counter(struct r600_shader_ctx *ctx,
8617 struct tgsi_full_src_register *src)
8618 {
8619 unsigned i;
8620
8621 if (src->Register.Indirect) {
8622 for (i = 0; i < ctx->shader->nhwatomic_ranges; i++) {
8623 if (src->Indirect.ArrayID == ctx->shader->atomics[i].array_id)
8624 return ctx->shader->atomics[i].hw_idx;
8625 }
8626 } else {
8627 uint32_t index = src->Register.Index;
8628 for (i = 0; i < ctx->shader->nhwatomic_ranges; i++) {
8629 if (ctx->shader->atomics[i].buffer_id != (unsigned)src->Dimension.Index)
8630 continue;
8631 if (index > ctx->shader->atomics[i].end)
8632 continue;
8633 if (index < ctx->shader->atomics[i].start)
8634 continue;
8635 uint32_t offset = (index - ctx->shader->atomics[i].start);
8636 return ctx->shader->atomics[i].hw_idx + offset;
8637 }
8638 }
8639 assert(0);
8640 return -1;
8641 }
8642
8643 static int tgsi_set_gds_temp(struct r600_shader_ctx *ctx,
8644 int *uav_id_p, int *uav_index_mode_p)
8645 {
8646 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8647 int uav_id, uav_index_mode = 0;
8648 int r;
8649 bool is_cm = (ctx->bc->chip_class == CAYMAN);
8650
8651 uav_id = find_hw_atomic_counter(ctx, &inst->Src[0]);
8652
8653 if (inst->Src[0].Register.Indirect) {
8654 if (is_cm) {
8655 struct r600_bytecode_alu alu;
8656 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8657 alu.op = ALU_OP2_LSHL_INT;
8658 alu.src[0].sel = get_address_file_reg(ctx, inst->Src[0].Indirect.Index);
8659 alu.src[0].chan = 0;
8660 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
8661 alu.src[1].value = 2;
8662 alu.dst.sel = ctx->temp_reg;
8663 alu.dst.chan = 0;
8664 alu.dst.write = 1;
8665 alu.last = 1;
8666 r = r600_bytecode_add_alu(ctx->bc, &alu);
8667 if (r)
8668 return r;
8669
8670 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
8671 ctx->temp_reg, 0,
8672 ctx->temp_reg, 0,
8673 V_SQ_ALU_SRC_LITERAL, uav_id * 4);
8674 if (r)
8675 return r;
8676 } else
8677 uav_index_mode = 2;
8678 } else if (is_cm) {
8679 r = single_alu_op2(ctx, ALU_OP1_MOV,
8680 ctx->temp_reg, 0,
8681 V_SQ_ALU_SRC_LITERAL, uav_id * 4,
8682 0, 0);
8683 if (r)
8684 return r;
8685 }
8686 *uav_id_p = uav_id;
8687 *uav_index_mode_p = uav_index_mode;
8688 return 0;
8689 }
8690
8691 static int tgsi_load_gds(struct r600_shader_ctx *ctx)
8692 {
8693 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8694 int r;
8695 struct r600_bytecode_gds gds;
8696 int uav_id = 0;
8697 int uav_index_mode = 0;
8698 bool is_cm = (ctx->bc->chip_class == CAYMAN);
8699
8700 r = tgsi_set_gds_temp(ctx, &uav_id, &uav_index_mode);
8701 if (r)
8702 return r;
8703
8704 memset(&gds, 0, sizeof(struct r600_bytecode_gds));
8705 gds.op = FETCH_OP_GDS_READ_RET;
8706 gds.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
8707 gds.uav_id = is_cm ? 0 : uav_id;
8708 gds.uav_index_mode = is_cm ? 0 : uav_index_mode;
8709 gds.src_gpr = ctx->temp_reg;
8710 gds.src_sel_x = (is_cm) ? 0 : 4;
8711 gds.src_sel_y = 4;
8712 gds.src_sel_z = 4;
8713 gds.dst_sel_x = 0;
8714 gds.dst_sel_y = 7;
8715 gds.dst_sel_z = 7;
8716 gds.dst_sel_w = 7;
8717 gds.src_gpr2 = 0;
8718 gds.alloc_consume = !is_cm;
8719 r = r600_bytecode_add_gds(ctx->bc, &gds);
8720 if (r)
8721 return r;
8722
8723 ctx->bc->cf_last->vpm = 1;
8724 return 0;
8725 }
8726
8727 /* this fixes up 1D arrays properly */
8728 static int load_index_src(struct r600_shader_ctx *ctx, int src_index, int *idx_gpr)
8729 {
8730 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8731 int r, i;
8732 struct r600_bytecode_alu alu;
8733 int temp_reg = r600_get_temp(ctx);
8734
8735 for (i = 0; i < 4; i++) {
8736 bool def_val = true, write_zero = false;
8737 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8738 alu.op = ALU_OP1_MOV;
8739 alu.dst.sel = temp_reg;
8740 alu.dst.chan = i;
8741
8742 switch (inst->Memory.Texture) {
8743 case TGSI_TEXTURE_BUFFER:
8744 case TGSI_TEXTURE_1D:
8745 if (i == 1 || i == 2 || i == 3) {
8746 write_zero = true;
8747 }
8748 break;
8749 case TGSI_TEXTURE_1D_ARRAY:
8750 if (i == 1 || i == 3)
8751 write_zero = true;
8752 else if (i == 2) {
8753 r600_bytecode_src(&alu.src[0], &ctx->src[src_index], 1);
8754 def_val = false;
8755 }
8756 break;
8757 case TGSI_TEXTURE_2D:
8758 if (i == 2 || i == 3)
8759 write_zero = true;
8760 break;
8761 default:
8762 if (i == 3)
8763 write_zero = true;
8764 break;
8765 }
8766
8767 if (write_zero) {
8768 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
8769 alu.src[0].value = 0;
8770 } else if (def_val) {
8771 r600_bytecode_src(&alu.src[0], &ctx->src[src_index], i);
8772 }
8773
8774 if (i == 3)
8775 alu.last = 1;
8776 alu.dst.write = 1;
8777 r = r600_bytecode_add_alu(ctx->bc, &alu);
8778 if (r)
8779 return r;
8780 }
8781 *idx_gpr = temp_reg;
8782 return 0;
8783 }
8784
8785 static int load_buffer_coord(struct r600_shader_ctx *ctx, int src_idx,
8786 int temp_reg)
8787 {
8788 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8789 int r;
8790 if (inst->Src[src_idx].Register.File == TGSI_FILE_IMMEDIATE) {
8791 int value = (ctx->literals[4 * inst->Src[src_idx].Register.Index + inst->Src[src_idx].Register.SwizzleX]);
8792 r = single_alu_op2(ctx, ALU_OP1_MOV,
8793 temp_reg, 0,
8794 V_SQ_ALU_SRC_LITERAL, value >> 2,
8795 0, 0);
8796 if (r)
8797 return r;
8798 } else {
8799 struct r600_bytecode_alu alu;
8800 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8801 alu.op = ALU_OP2_LSHR_INT;
8802 r600_bytecode_src(&alu.src[0], &ctx->src[src_idx], 0);
8803 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
8804 alu.src[1].value = 2;
8805 alu.dst.sel = temp_reg;
8806 alu.dst.write = 1;
8807 alu.last = 1;
8808 r = r600_bytecode_add_alu(ctx->bc, &alu);
8809 if (r)
8810 return r;
8811 }
8812 return 0;
8813 }
8814
8815 static int tgsi_load_buffer(struct r600_shader_ctx *ctx)
8816 {
8817 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8818 /* have to work out the offset into the RAT immediate return buffer */
8819 struct r600_bytecode_vtx vtx;
8820 struct r600_bytecode_cf *cf;
8821 int r;
8822 int temp_reg = r600_get_temp(ctx);
8823 unsigned rat_index_mode;
8824 unsigned base;
8825
8826 rat_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
8827 base = R600_IMAGE_REAL_RESOURCE_OFFSET + ctx->info.file_count[TGSI_FILE_IMAGE];
8828
8829 r = load_buffer_coord(ctx, 1, temp_reg);
8830 if (r)
8831 return r;
8832 ctx->bc->cf_last->barrier = 1;
8833 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
8834 vtx.op = FETCH_OP_VFETCH;
8835 vtx.buffer_id = inst->Src[0].Register.Index + base;
8836 vtx.buffer_index_mode = rat_index_mode;
8837 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
8838 vtx.src_gpr = temp_reg;
8839 vtx.src_sel_x = 0;
8840 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
8841 vtx.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; /* SEL_X */
8842 vtx.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7; /* SEL_Y */
8843 vtx.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7; /* SEL_Z */
8844 vtx.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7; /* SEL_W */
8845 vtx.num_format_all = 1;
8846 vtx.format_comp_all = 1;
8847 vtx.srf_mode_all = 0;
8848
8849 if (inst->Dst[0].Register.WriteMask & 8) {
8850 vtx.data_format = FMT_32_32_32_32;
8851 vtx.use_const_fields = 0;
8852 } else if (inst->Dst[0].Register.WriteMask & 4) {
8853 vtx.data_format = FMT_32_32_32;
8854 vtx.use_const_fields = 0;
8855 } else if (inst->Dst[0].Register.WriteMask & 2) {
8856 vtx.data_format = FMT_32_32;
8857 vtx.use_const_fields = 0;
8858 } else {
8859 vtx.data_format = FMT_32;
8860 vtx.use_const_fields = 0;
8861 }
8862
8863 r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx);
8864 if (r)
8865 return r;
8866 cf = ctx->bc->cf_last;
8867 cf->barrier = 1;
8868 return 0;
8869 }
8870
8871 static int tgsi_load_rat(struct r600_shader_ctx *ctx)
8872 {
8873 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8874 /* have to work out the offset into the RAT immediate return buffer */
8875 struct r600_bytecode_vtx vtx;
8876 struct r600_bytecode_cf *cf;
8877 int r;
8878 int idx_gpr;
8879 unsigned format, num_format, format_comp, endian;
8880 const struct util_format_description *desc;
8881 unsigned rat_index_mode;
8882 unsigned immed_base;
8883
8884 rat_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
8885
8886 immed_base = R600_IMAGE_IMMED_RESOURCE_OFFSET;
8887 r = load_index_src(ctx, 1, &idx_gpr);
8888 if (r)
8889 return r;
8890
8891 if (rat_index_mode)
8892 egcm_load_index_reg(ctx->bc, 1, false);
8893
8894 r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT);
8895 cf = ctx->bc->cf_last;
8896
8897 cf->rat.id = ctx->shader->rat_base + inst->Src[0].Register.Index;
8898 cf->rat.inst = V_RAT_INST_NOP_RTN;
8899 cf->rat.index_mode = rat_index_mode;
8900 cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_READ_IND;
8901 cf->output.gpr = ctx->thread_id_gpr;
8902 cf->output.index_gpr = idx_gpr;
8903 cf->output.comp_mask = 0xf;
8904 cf->output.burst_count = 1;
8905 cf->vpm = 1;
8906 cf->barrier = 1;
8907 cf->mark = 1;
8908 cf->output.elem_size = 0;
8909
8910 r600_bytecode_add_cfinst(ctx->bc, CF_OP_WAIT_ACK);
8911 cf = ctx->bc->cf_last;
8912 cf->barrier = 1;
8913
8914 desc = util_format_description(inst->Memory.Format);
8915 r600_vertex_data_type(inst->Memory.Format,
8916 &format, &num_format, &format_comp, &endian);
8917 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
8918 vtx.op = FETCH_OP_VFETCH;
8919 vtx.buffer_id = immed_base + inst->Src[0].Register.Index;
8920 vtx.buffer_index_mode = rat_index_mode;
8921 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
8922 vtx.src_gpr = ctx->thread_id_gpr;
8923 vtx.src_sel_x = 1;
8924 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
8925 vtx.dst_sel_x = desc->swizzle[0];
8926 vtx.dst_sel_y = desc->swizzle[1];
8927 vtx.dst_sel_z = desc->swizzle[2];
8928 vtx.dst_sel_w = desc->swizzle[3];
8929 vtx.srf_mode_all = 1;
8930 vtx.data_format = format;
8931 vtx.num_format_all = num_format;
8932 vtx.format_comp_all = format_comp;
8933 vtx.endian = endian;
8934 vtx.offset = 0;
8935 vtx.mega_fetch_count = 3;
8936 r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx);
8937 if (r)
8938 return r;
8939 cf = ctx->bc->cf_last;
8940 cf->barrier = 1;
8941 return 0;
8942 }
8943
8944 static int tgsi_load_lds(struct r600_shader_ctx *ctx)
8945 {
8946 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8947 struct r600_bytecode_alu alu;
8948 int r;
8949 int temp_reg = r600_get_temp(ctx);
8950
8951 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8952 alu.op = ALU_OP1_MOV;
8953 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
8954 alu.dst.sel = temp_reg;
8955 alu.dst.write = 1;
8956 alu.last = 1;
8957 r = r600_bytecode_add_alu(ctx->bc, &alu);
8958 if (r)
8959 return r;
8960
8961 r = do_lds_fetch_values(ctx, temp_reg,
8962 ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index, inst->Dst[0].Register.WriteMask);
8963 if (r)
8964 return r;
8965 return 0;
8966 }
8967
8968 static int tgsi_load(struct r600_shader_ctx *ctx)
8969 {
8970 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8971 if (inst->Src[0].Register.File == TGSI_FILE_IMAGE)
8972 return tgsi_load_rat(ctx);
8973 if (inst->Src[0].Register.File == TGSI_FILE_HW_ATOMIC)
8974 return tgsi_load_gds(ctx);
8975 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER)
8976 return tgsi_load_buffer(ctx);
8977 if (inst->Src[0].Register.File == TGSI_FILE_MEMORY)
8978 return tgsi_load_lds(ctx);
8979 return 0;
8980 }
8981
8982 static int tgsi_store_buffer_rat(struct r600_shader_ctx *ctx)
8983 {
8984 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8985 struct r600_bytecode_cf *cf;
8986 int r, i;
8987 unsigned rat_index_mode;
8988 int lasti;
8989 int temp_reg = r600_get_temp(ctx), treg2 = r600_get_temp(ctx);
8990
8991 r = load_buffer_coord(ctx, 0, treg2);
8992 if (r)
8993 return r;
8994
8995 rat_index_mode = inst->Dst[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
8996 if (rat_index_mode)
8997 egcm_load_index_reg(ctx->bc, 1, false);
8998
8999 for (i = 0; i <= 3; i++) {
9000 struct r600_bytecode_alu alu;
9001 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9002 alu.op = ALU_OP1_MOV;
9003 alu.dst.sel = temp_reg;
9004 alu.dst.chan = i;
9005 alu.src[0].sel = V_SQ_ALU_SRC_0;
9006 alu.last = (i == 3);
9007 alu.dst.write = 1;
9008 r = r600_bytecode_add_alu(ctx->bc, &alu);
9009 if (r)
9010 return r;
9011 }
9012
9013 lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
9014 for (i = 0; i <= lasti; i++) {
9015 struct r600_bytecode_alu alu;
9016 if (!((1 << i) & inst->Dst[0].Register.WriteMask))
9017 continue;
9018
9019 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
9020 temp_reg, 0,
9021 treg2, 0,
9022 V_SQ_ALU_SRC_LITERAL, i);
9023 if (r)
9024 return r;
9025
9026 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9027 alu.op = ALU_OP1_MOV;
9028 alu.dst.sel = ctx->temp_reg;
9029 alu.dst.chan = 0;
9030
9031 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
9032 alu.last = 1;
9033 alu.dst.write = 1;
9034 r = r600_bytecode_add_alu(ctx->bc, &alu);
9035 if (r)
9036 return r;
9037
9038 r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT);
9039 cf = ctx->bc->cf_last;
9040
9041 cf->rat.id = ctx->shader->rat_base + inst->Dst[0].Register.Index + ctx->info.file_count[TGSI_FILE_IMAGE];
9042 cf->rat.inst = V_RAT_INST_STORE_TYPED;
9043 cf->rat.index_mode = rat_index_mode;
9044 cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND;
9045 cf->output.gpr = ctx->temp_reg;
9046 cf->output.index_gpr = temp_reg;
9047 cf->output.comp_mask = 1;
9048 cf->output.burst_count = 1;
9049 cf->vpm = 1;
9050 cf->barrier = 1;
9051 cf->output.elem_size = 0;
9052 }
9053 return 0;
9054 }
9055
9056 static int tgsi_store_rat(struct r600_shader_ctx *ctx)
9057 {
9058 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9059 struct r600_bytecode_cf *cf;
9060 bool src_requires_loading = false;
9061 int val_gpr, idx_gpr;
9062 int r, i;
9063 unsigned rat_index_mode;
9064
9065 rat_index_mode = inst->Dst[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
9066
9067 r = load_index_src(ctx, 0, &idx_gpr);
9068 if (r)
9069 return r;
9070
9071 if (inst->Src[1].Register.File != TGSI_FILE_TEMPORARY)
9072 src_requires_loading = true;
9073
9074 if (src_requires_loading) {
9075 struct r600_bytecode_alu alu;
9076 for (i = 0; i < 4; i++) {
9077 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9078 alu.op = ALU_OP1_MOV;
9079 alu.dst.sel = ctx->temp_reg;
9080 alu.dst.chan = i;
9081
9082 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
9083 if (i == 3)
9084 alu.last = 1;
9085 alu.dst.write = 1;
9086 r = r600_bytecode_add_alu(ctx->bc, &alu);
9087 if (r)
9088 return r;
9089 }
9090 val_gpr = ctx->temp_reg;
9091 } else
9092 val_gpr = tgsi_tex_get_src_gpr(ctx, 1);
9093 if (rat_index_mode)
9094 egcm_load_index_reg(ctx->bc, 1, false);
9095
9096 r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT);
9097 cf = ctx->bc->cf_last;
9098
9099 cf->rat.id = ctx->shader->rat_base + inst->Dst[0].Register.Index;
9100 cf->rat.inst = V_RAT_INST_STORE_TYPED;
9101 cf->rat.index_mode = rat_index_mode;
9102 cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND;
9103 cf->output.gpr = val_gpr;
9104 cf->output.index_gpr = idx_gpr;
9105 cf->output.comp_mask = 0xf;
9106 cf->output.burst_count = 1;
9107 cf->vpm = 1;
9108 cf->barrier = 1;
9109 cf->output.elem_size = 0;
9110 return 0;
9111 }
9112
9113 static int tgsi_store_lds(struct r600_shader_ctx *ctx)
9114 {
9115 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9116 struct r600_bytecode_alu alu;
9117 int r, i, lasti;
9118 int write_mask = inst->Dst[0].Register.WriteMask;
9119 int temp_reg = r600_get_temp(ctx);
9120
9121 /* LDS write */
9122 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9123 alu.op = ALU_OP1_MOV;
9124 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9125 alu.dst.sel = temp_reg;
9126 alu.dst.write = 1;
9127 alu.last = 1;
9128 r = r600_bytecode_add_alu(ctx->bc, &alu);
9129 if (r)
9130 return r;
9131
9132 lasti = tgsi_last_instruction(write_mask);
9133 for (i = 1; i <= lasti; i++) {
9134 if (!(write_mask & (1 << i)))
9135 continue;
9136 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
9137 temp_reg, i,
9138 temp_reg, 0,
9139 V_SQ_ALU_SRC_LITERAL, 4 * i);
9140 if (r)
9141 return r;
9142 }
9143 for (i = 0; i <= lasti; i++) {
9144 if (!(write_mask & (1 << i)))
9145 continue;
9146
9147 if ((i == 0 && ((write_mask & 3) == 3)) ||
9148 (i == 2 && ((write_mask & 0xc) == 0xc))) {
9149 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9150 alu.op = LDS_OP3_LDS_WRITE_REL;
9151
9152 alu.src[0].sel = temp_reg;
9153 alu.src[0].chan = i;
9154 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
9155 r600_bytecode_src(&alu.src[2], &ctx->src[1], i + 1);
9156 alu.last = 1;
9157 alu.is_lds_idx_op = true;
9158 alu.lds_idx = 1;
9159 r = r600_bytecode_add_alu(ctx->bc, &alu);
9160 if (r)
9161 return r;
9162 i += 1;
9163 continue;
9164 }
9165 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9166 alu.op = LDS_OP2_LDS_WRITE;
9167
9168 alu.src[0].sel = temp_reg;
9169 alu.src[0].chan = i;
9170 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
9171
9172 alu.last = 1;
9173 alu.is_lds_idx_op = true;
9174
9175 r = r600_bytecode_add_alu(ctx->bc, &alu);
9176 if (r)
9177 return r;
9178 }
9179 return 0;
9180 }
9181
9182 static int tgsi_store(struct r600_shader_ctx *ctx)
9183 {
9184 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9185 if (inst->Dst[0].Register.File == TGSI_FILE_BUFFER)
9186 return tgsi_store_buffer_rat(ctx);
9187 else if (inst->Dst[0].Register.File == TGSI_FILE_MEMORY)
9188 return tgsi_store_lds(ctx);
9189 else
9190 return tgsi_store_rat(ctx);
9191 }
9192
9193 static int tgsi_atomic_op_rat(struct r600_shader_ctx *ctx)
9194 {
9195 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9196 /* have to work out the offset into the RAT immediate return buffer */
9197 struct r600_bytecode_alu alu;
9198 struct r600_bytecode_vtx vtx;
9199 struct r600_bytecode_cf *cf;
9200 int r;
9201 int idx_gpr;
9202 unsigned format, num_format, format_comp, endian;
9203 const struct util_format_description *desc;
9204 unsigned rat_index_mode;
9205 unsigned immed_base;
9206 unsigned rat_base;
9207
9208 immed_base = R600_IMAGE_IMMED_RESOURCE_OFFSET;
9209 rat_base = ctx->shader->rat_base;
9210
9211 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
9212 immed_base += ctx->info.file_count[TGSI_FILE_IMAGE];
9213 rat_base += ctx->info.file_count[TGSI_FILE_IMAGE];
9214
9215 r = load_buffer_coord(ctx, 1, ctx->temp_reg);
9216 if (r)
9217 return r;
9218 idx_gpr = ctx->temp_reg;
9219 } else {
9220 r = load_index_src(ctx, 1, &idx_gpr);
9221 if (r)
9222 return r;
9223 }
9224
9225 rat_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
9226
9227 if (ctx->inst_info->op == V_RAT_INST_CMPXCHG_INT_RTN) {
9228 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9229 alu.op = ALU_OP1_MOV;
9230 alu.dst.sel = ctx->thread_id_gpr;
9231 alu.dst.chan = 0;
9232 alu.dst.write = 1;
9233 r600_bytecode_src(&alu.src[0], &ctx->src[3], 0);
9234 alu.last = 1;
9235 r = r600_bytecode_add_alu(ctx->bc, &alu);
9236 if (r)
9237 return r;
9238
9239 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9240 alu.op = ALU_OP1_MOV;
9241 alu.dst.sel = ctx->thread_id_gpr;
9242 if (ctx->bc->chip_class == CAYMAN)
9243 alu.dst.chan = 2;
9244 else
9245 alu.dst.chan = 3;
9246 alu.dst.write = 1;
9247 r600_bytecode_src(&alu.src[0], &ctx->src[2], 0);
9248 alu.last = 1;
9249 r = r600_bytecode_add_alu(ctx->bc, &alu);
9250 if (r)
9251 return r;
9252 } else {
9253 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9254 alu.op = ALU_OP1_MOV;
9255 alu.dst.sel = ctx->thread_id_gpr;
9256 alu.dst.chan = 0;
9257 alu.dst.write = 1;
9258 r600_bytecode_src(&alu.src[0], &ctx->src[2], 0);
9259 alu.last = 1;
9260 r = r600_bytecode_add_alu(ctx->bc, &alu);
9261 if (r)
9262 return r;
9263 }
9264
9265 if (rat_index_mode)
9266 egcm_load_index_reg(ctx->bc, 1, false);
9267 r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT);
9268 cf = ctx->bc->cf_last;
9269
9270 cf->rat.id = rat_base + inst->Src[0].Register.Index;
9271 cf->rat.inst = ctx->inst_info->op;
9272 cf->rat.index_mode = rat_index_mode;
9273 cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_READ_IND;
9274 cf->output.gpr = ctx->thread_id_gpr;
9275 cf->output.index_gpr = idx_gpr;
9276 cf->output.comp_mask = 0xf;
9277 cf->output.burst_count = 1;
9278 cf->vpm = 1;
9279 cf->barrier = 1;
9280 cf->mark = 1;
9281 cf->output.elem_size = 0;
9282 r600_bytecode_add_cfinst(ctx->bc, CF_OP_WAIT_ACK);
9283 cf = ctx->bc->cf_last;
9284 cf->barrier = 1;
9285 cf->cf_addr = 1;
9286
9287 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
9288 if (inst->Src[0].Register.File == TGSI_FILE_IMAGE) {
9289 desc = util_format_description(inst->Memory.Format);
9290 r600_vertex_data_type(inst->Memory.Format,
9291 &format, &num_format, &format_comp, &endian);
9292 vtx.dst_sel_x = desc->swizzle[0];
9293 } else {
9294 format = FMT_32;
9295 num_format = 1;
9296 format_comp = 0;
9297 endian = 0;
9298 vtx.dst_sel_x = 0;
9299 }
9300 vtx.op = FETCH_OP_VFETCH;
9301 vtx.buffer_id = immed_base + inst->Src[0].Register.Index;
9302 vtx.buffer_index_mode = rat_index_mode;
9303 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
9304 vtx.src_gpr = ctx->thread_id_gpr;
9305 vtx.src_sel_x = 1;
9306 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
9307 vtx.dst_sel_y = 7;
9308 vtx.dst_sel_z = 7;
9309 vtx.dst_sel_w = 7;
9310 vtx.use_const_fields = 0;
9311 vtx.srf_mode_all = 1;
9312 vtx.data_format = format;
9313 vtx.num_format_all = num_format;
9314 vtx.format_comp_all = format_comp;
9315 vtx.endian = endian;
9316 vtx.offset = 0;
9317 vtx.mega_fetch_count = 0xf;
9318 r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx);
9319 if (r)
9320 return r;
9321 cf = ctx->bc->cf_last;
9322 cf->vpm = 1;
9323 cf->barrier = 1;
9324 return 0;
9325 }
9326
9327 static int get_gds_op(int opcode)
9328 {
9329 switch (opcode) {
9330 case TGSI_OPCODE_ATOMUADD:
9331 return FETCH_OP_GDS_ADD_RET;
9332 case TGSI_OPCODE_ATOMAND:
9333 return FETCH_OP_GDS_AND_RET;
9334 case TGSI_OPCODE_ATOMOR:
9335 return FETCH_OP_GDS_OR_RET;
9336 case TGSI_OPCODE_ATOMXOR:
9337 return FETCH_OP_GDS_XOR_RET;
9338 case TGSI_OPCODE_ATOMUMIN:
9339 return FETCH_OP_GDS_MIN_UINT_RET;
9340 case TGSI_OPCODE_ATOMUMAX:
9341 return FETCH_OP_GDS_MAX_UINT_RET;
9342 case TGSI_OPCODE_ATOMXCHG:
9343 return FETCH_OP_GDS_XCHG_RET;
9344 case TGSI_OPCODE_ATOMCAS:
9345 return FETCH_OP_GDS_CMP_XCHG_RET;
9346 default:
9347 return -1;
9348 }
9349 }
9350
9351 static int tgsi_atomic_op_gds(struct r600_shader_ctx *ctx)
9352 {
9353 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9354 struct r600_bytecode_gds gds;
9355 struct r600_bytecode_alu alu;
9356 int gds_op = get_gds_op(inst->Instruction.Opcode);
9357 int r;
9358 int uav_id = 0;
9359 int uav_index_mode = 0;
9360 bool is_cm = (ctx->bc->chip_class == CAYMAN);
9361
9362 if (gds_op == -1) {
9363 fprintf(stderr, "unknown GDS op for opcode %d\n", inst->Instruction.Opcode);
9364 return -1;
9365 }
9366
9367 r = tgsi_set_gds_temp(ctx, &uav_id, &uav_index_mode);
9368 if (r)
9369 return r;
9370
9371 if (gds_op == FETCH_OP_GDS_CMP_XCHG_RET) {
9372 if (inst->Src[3].Register.File == TGSI_FILE_IMMEDIATE) {
9373 int value = (ctx->literals[4 * inst->Src[3].Register.Index + inst->Src[3].Register.SwizzleX]);
9374 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9375 alu.op = ALU_OP1_MOV;
9376 alu.dst.sel = ctx->temp_reg;
9377 alu.dst.chan = is_cm ? 2 : 1;
9378 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
9379 alu.src[0].value = value;
9380 alu.last = 1;
9381 alu.dst.write = 1;
9382 r = r600_bytecode_add_alu(ctx->bc, &alu);
9383 if (r)
9384 return r;
9385 } else {
9386 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9387 alu.op = ALU_OP1_MOV;
9388 alu.dst.sel = ctx->temp_reg;
9389 alu.dst.chan = is_cm ? 2 : 1;
9390 r600_bytecode_src(&alu.src[0], &ctx->src[3], 0);
9391 alu.last = 1;
9392 alu.dst.write = 1;
9393 r = r600_bytecode_add_alu(ctx->bc, &alu);
9394 if (r)
9395 return r;
9396 }
9397 }
9398 if (inst->Src[2].Register.File == TGSI_FILE_IMMEDIATE) {
9399 int value = (ctx->literals[4 * inst->Src[2].Register.Index + inst->Src[2].Register.SwizzleX]);
9400 int abs_value = abs(value);
9401 if (abs_value != value && gds_op == FETCH_OP_GDS_ADD_RET)
9402 gds_op = FETCH_OP_GDS_SUB_RET;
9403 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9404 alu.op = ALU_OP1_MOV;
9405 alu.dst.sel = ctx->temp_reg;
9406 alu.dst.chan = is_cm ? 1 : 0;
9407 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
9408 alu.src[0].value = abs_value;
9409 alu.last = 1;
9410 alu.dst.write = 1;
9411 r = r600_bytecode_add_alu(ctx->bc, &alu);
9412 if (r)
9413 return r;
9414 } else {
9415 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9416 alu.op = ALU_OP1_MOV;
9417 alu.dst.sel = ctx->temp_reg;
9418 alu.dst.chan = is_cm ? 1 : 0;
9419 r600_bytecode_src(&alu.src[0], &ctx->src[2], 0);
9420 alu.last = 1;
9421 alu.dst.write = 1;
9422 r = r600_bytecode_add_alu(ctx->bc, &alu);
9423 if (r)
9424 return r;
9425 }
9426
9427
9428 memset(&gds, 0, sizeof(struct r600_bytecode_gds));
9429 gds.op = gds_op;
9430 gds.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
9431 gds.uav_id = is_cm ? 0 : uav_id;
9432 gds.uav_index_mode = is_cm ? 0 : uav_index_mode;
9433 gds.src_gpr = ctx->temp_reg;
9434 gds.src_gpr2 = 0;
9435 gds.src_sel_x = is_cm ? 0 : 4;
9436 gds.src_sel_y = is_cm ? 1 : 0;
9437 if (gds_op == FETCH_OP_GDS_CMP_XCHG_RET)
9438 gds.src_sel_z = is_cm ? 2 : 1;
9439 else
9440 gds.src_sel_z = 7;
9441 gds.dst_sel_x = 0;
9442 gds.dst_sel_y = 7;
9443 gds.dst_sel_z = 7;
9444 gds.dst_sel_w = 7;
9445 gds.alloc_consume = !is_cm;
9446
9447 r = r600_bytecode_add_gds(ctx->bc, &gds);
9448 if (r)
9449 return r;
9450 ctx->bc->cf_last->vpm = 1;
9451 return 0;
9452 }
9453
9454 static int get_lds_op(int opcode)
9455 {
9456 switch (opcode) {
9457 case TGSI_OPCODE_ATOMUADD:
9458 return LDS_OP2_LDS_ADD_RET;
9459 case TGSI_OPCODE_ATOMAND:
9460 return LDS_OP2_LDS_AND_RET;
9461 case TGSI_OPCODE_ATOMOR:
9462 return LDS_OP2_LDS_OR_RET;
9463 case TGSI_OPCODE_ATOMXOR:
9464 return LDS_OP2_LDS_XOR_RET;
9465 case TGSI_OPCODE_ATOMUMIN:
9466 return LDS_OP2_LDS_MIN_UINT_RET;
9467 case TGSI_OPCODE_ATOMUMAX:
9468 return LDS_OP2_LDS_MAX_UINT_RET;
9469 case TGSI_OPCODE_ATOMIMIN:
9470 return LDS_OP2_LDS_MIN_INT_RET;
9471 case TGSI_OPCODE_ATOMIMAX:
9472 return LDS_OP2_LDS_MAX_INT_RET;
9473 case TGSI_OPCODE_ATOMXCHG:
9474 return LDS_OP2_LDS_XCHG_RET;
9475 case TGSI_OPCODE_ATOMCAS:
9476 return LDS_OP3_LDS_CMP_XCHG_RET;
9477 default:
9478 return -1;
9479 }
9480 }
9481
9482 static int tgsi_atomic_op_lds(struct r600_shader_ctx *ctx)
9483 {
9484 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9485 int lds_op = get_lds_op(inst->Instruction.Opcode);
9486 int r;
9487
9488 struct r600_bytecode_alu alu;
9489 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9490 alu.op = lds_op;
9491 alu.is_lds_idx_op = true;
9492 alu.last = 1;
9493 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
9494 r600_bytecode_src(&alu.src[1], &ctx->src[2], 0);
9495 if (lds_op == LDS_OP3_LDS_CMP_XCHG_RET)
9496 r600_bytecode_src(&alu.src[2], &ctx->src[3], 0);
9497 else
9498 alu.src[2].sel = V_SQ_ALU_SRC_0;
9499 r = r600_bytecode_add_alu(ctx->bc, &alu);
9500 if (r)
9501 return r;
9502
9503 /* then read from LDS_OQ_A_POP */
9504 memset(&alu, 0, sizeof(alu));
9505
9506 alu.op = ALU_OP1_MOV;
9507 alu.src[0].sel = EG_V_SQ_ALU_SRC_LDS_OQ_A_POP;
9508 alu.src[0].chan = 0;
9509 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
9510 alu.dst.write = 1;
9511 alu.last = 1;
9512 r = r600_bytecode_add_alu(ctx->bc, &alu);
9513 if (r)
9514 return r;
9515
9516 return 0;
9517 }
9518
9519 static int tgsi_atomic_op(struct r600_shader_ctx *ctx)
9520 {
9521 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9522 if (inst->Src[0].Register.File == TGSI_FILE_IMAGE)
9523 return tgsi_atomic_op_rat(ctx);
9524 if (inst->Src[0].Register.File == TGSI_FILE_HW_ATOMIC)
9525 return tgsi_atomic_op_gds(ctx);
9526 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER)
9527 return tgsi_atomic_op_rat(ctx);
9528 if (inst->Src[0].Register.File == TGSI_FILE_MEMORY)
9529 return tgsi_atomic_op_lds(ctx);
9530 return 0;
9531 }
9532
9533 static int tgsi_resq(struct r600_shader_ctx *ctx)
9534 {
9535 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9536 unsigned sampler_index_mode;
9537 struct r600_bytecode_tex tex;
9538 int r;
9539 boolean has_txq_cube_array_z = false;
9540
9541 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER ||
9542 (inst->Src[0].Register.File == TGSI_FILE_IMAGE && inst->Memory.Texture == TGSI_TEXTURE_BUFFER)) {
9543 if (ctx->bc->chip_class < EVERGREEN)
9544 ctx->shader->uses_tex_buffers = true;
9545 unsigned eg_buffer_base = 0;
9546 eg_buffer_base = R600_IMAGE_REAL_RESOURCE_OFFSET;
9547 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER)
9548 eg_buffer_base += ctx->info.file_count[TGSI_FILE_IMAGE];
9549 return r600_do_buffer_txq(ctx, 0, ctx->shader->image_size_const_offset, eg_buffer_base);
9550 }
9551
9552 if (inst->Memory.Texture == TGSI_TEXTURE_CUBE_ARRAY &&
9553 inst->Dst[0].Register.WriteMask & 4) {
9554 ctx->shader->has_txq_cube_array_z_comp = true;
9555 has_txq_cube_array_z = true;
9556 }
9557
9558 sampler_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
9559 if (sampler_index_mode)
9560 egcm_load_index_reg(ctx->bc, 1, false);
9561
9562
9563 /* does this shader want a num layers from TXQ for a cube array? */
9564 if (has_txq_cube_array_z) {
9565 int id = tgsi_tex_get_src_gpr(ctx, 0) + ctx->shader->image_size_const_offset;
9566 struct r600_bytecode_alu alu;
9567
9568 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9569 alu.op = ALU_OP1_MOV;
9570
9571 alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
9572 /* with eg each dword is either number of cubes */
9573 alu.src[0].sel += id / 4;
9574 alu.src[0].chan = id % 4;
9575 alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
9576 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
9577 alu.last = 1;
9578 r = r600_bytecode_add_alu(ctx->bc, &alu);
9579 if (r)
9580 return r;
9581 /* disable writemask from texture instruction */
9582 inst->Dst[0].Register.WriteMask &= ~4;
9583 }
9584 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
9585 tex.op = ctx->inst_info->op;
9586 tex.sampler_id = R600_IMAGE_REAL_RESOURCE_OFFSET + inst->Src[0].Register.Index;
9587 tex.sampler_index_mode = sampler_index_mode;
9588 tex.resource_id = tex.sampler_id;
9589 tex.resource_index_mode = sampler_index_mode;
9590 tex.src_sel_x = 4;
9591 tex.src_sel_y = 4;
9592 tex.src_sel_z = 4;
9593 tex.src_sel_w = 4;
9594 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
9595 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
9596 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
9597 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
9598 tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
9599 r = r600_bytecode_add_tex(ctx->bc, &tex);
9600 if (r)
9601 return r;
9602
9603 return 0;
9604 }
9605
9606 static int tgsi_lrp(struct r600_shader_ctx *ctx)
9607 {
9608 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9609 struct r600_bytecode_alu alu;
9610 unsigned lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
9611 struct r600_bytecode_alu_src srcs[2][4];
9612 unsigned i;
9613 int r;
9614
9615 /* optimize if it's just an equal balance */
9616 if (ctx->src[0].sel == V_SQ_ALU_SRC_0_5) {
9617 for (i = 0; i < lasti + 1; i++) {
9618 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
9619 continue;
9620
9621 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9622 alu.op = ALU_OP2_ADD;
9623 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
9624 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
9625 alu.omod = 3;
9626 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
9627 alu.dst.chan = i;
9628 if (i == lasti) {
9629 alu.last = 1;
9630 }
9631 r = r600_bytecode_add_alu(ctx->bc, &alu);
9632 if (r)
9633 return r;
9634 }
9635 return 0;
9636 }
9637
9638 /* 1 - src0 */
9639 for (i = 0; i < lasti + 1; i++) {
9640 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
9641 continue;
9642
9643 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9644 alu.op = ALU_OP2_ADD;
9645 alu.src[0].sel = V_SQ_ALU_SRC_1;
9646 alu.src[0].chan = 0;
9647 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
9648 r600_bytecode_src_toggle_neg(&alu.src[1]);
9649 alu.dst.sel = ctx->temp_reg;
9650 alu.dst.chan = i;
9651 if (i == lasti) {
9652 alu.last = 1;
9653 }
9654 alu.dst.write = 1;
9655 r = r600_bytecode_add_alu(ctx->bc, &alu);
9656 if (r)
9657 return r;
9658 }
9659
9660 /* (1 - src0) * src2 */
9661 for (i = 0; i < lasti + 1; i++) {
9662 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
9663 continue;
9664
9665 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9666 alu.op = ALU_OP2_MUL;
9667 alu.src[0].sel = ctx->temp_reg;
9668 alu.src[0].chan = i;
9669 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
9670 alu.dst.sel = ctx->temp_reg;
9671 alu.dst.chan = i;
9672 if (i == lasti) {
9673 alu.last = 1;
9674 }
9675 alu.dst.write = 1;
9676 r = r600_bytecode_add_alu(ctx->bc, &alu);
9677 if (r)
9678 return r;
9679 }
9680
9681 /* src0 * src1 + (1 - src0) * src2 */
9682
9683 for (i = 0; i < 2; i++) {
9684 r = tgsi_make_src_for_op3(ctx, inst->Dst[0].Register.WriteMask,
9685 srcs[i], &ctx->src[i]);
9686 if (r)
9687 return r;
9688 }
9689
9690 for (i = 0; i < lasti + 1; i++) {
9691 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
9692 continue;
9693
9694 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9695 alu.op = ALU_OP3_MULADD;
9696 alu.is_op3 = 1;
9697 alu.src[0] = srcs[0][i];
9698 alu.src[1] = srcs[1][i];
9699 alu.src[2].sel = ctx->temp_reg;
9700 alu.src[2].chan = i;
9701
9702 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
9703 alu.dst.chan = i;
9704 if (i == lasti) {
9705 alu.last = 1;
9706 }
9707 r = r600_bytecode_add_alu(ctx->bc, &alu);
9708 if (r)
9709 return r;
9710 }
9711 return 0;
9712 }
9713
9714 static int tgsi_cmp(struct r600_shader_ctx *ctx)
9715 {
9716 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9717 struct r600_bytecode_alu alu;
9718 int i, r, j;
9719 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
9720 struct r600_bytecode_alu_src srcs[3][4];
9721
9722 unsigned op;
9723
9724 if (ctx->src[0].abs && ctx->src[0].neg) {
9725 op = ALU_OP3_CNDE;
9726 ctx->src[0].abs = 0;
9727 ctx->src[0].neg = 0;
9728 } else {
9729 op = ALU_OP3_CNDGE;
9730 }
9731
9732 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
9733 r = tgsi_make_src_for_op3(ctx, inst->Dst[0].Register.WriteMask,
9734 srcs[j], &ctx->src[j]);
9735 if (r)
9736 return r;
9737 }
9738
9739 for (i = 0; i < lasti + 1; i++) {
9740 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
9741 continue;
9742
9743 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9744 alu.op = op;
9745 alu.src[0] = srcs[0][i];
9746 alu.src[1] = srcs[2][i];
9747 alu.src[2] = srcs[1][i];
9748
9749 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
9750 alu.dst.chan = i;
9751 alu.dst.write = 1;
9752 alu.is_op3 = 1;
9753 if (i == lasti)
9754 alu.last = 1;
9755 r = r600_bytecode_add_alu(ctx->bc, &alu);
9756 if (r)
9757 return r;
9758 }
9759 return 0;
9760 }
9761
9762 static int tgsi_ucmp(struct r600_shader_ctx *ctx)
9763 {
9764 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9765 struct r600_bytecode_alu alu;
9766 int i, r;
9767 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
9768
9769 for (i = 0; i < lasti + 1; i++) {
9770 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
9771 continue;
9772
9773 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9774 alu.op = ALU_OP3_CNDE_INT;
9775 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
9776 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
9777 r600_bytecode_src(&alu.src[2], &ctx->src[1], i);
9778 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
9779 alu.dst.chan = i;
9780 alu.dst.write = 1;
9781 alu.is_op3 = 1;
9782 if (i == lasti)
9783 alu.last = 1;
9784 r = r600_bytecode_add_alu(ctx->bc, &alu);
9785 if (r)
9786 return r;
9787 }
9788 return 0;
9789 }
9790
9791 static int tgsi_exp(struct r600_shader_ctx *ctx)
9792 {
9793 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9794 struct r600_bytecode_alu alu;
9795 int r;
9796 unsigned i;
9797
9798 /* result.x = 2^floor(src); */
9799 if (inst->Dst[0].Register.WriteMask & 1) {
9800 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9801
9802 alu.op = ALU_OP1_FLOOR;
9803 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9804
9805 alu.dst.sel = ctx->temp_reg;
9806 alu.dst.chan = 0;
9807 alu.dst.write = 1;
9808 alu.last = 1;
9809 r = r600_bytecode_add_alu(ctx->bc, &alu);
9810 if (r)
9811 return r;
9812
9813 if (ctx->bc->chip_class == CAYMAN) {
9814 for (i = 0; i < 3; i++) {
9815 alu.op = ALU_OP1_EXP_IEEE;
9816 alu.src[0].sel = ctx->temp_reg;
9817 alu.src[0].chan = 0;
9818
9819 alu.dst.sel = ctx->temp_reg;
9820 alu.dst.chan = i;
9821 alu.dst.write = i == 0;
9822 alu.last = i == 2;
9823 r = r600_bytecode_add_alu(ctx->bc, &alu);
9824 if (r)
9825 return r;
9826 }
9827 } else {
9828 alu.op = ALU_OP1_EXP_IEEE;
9829 alu.src[0].sel = ctx->temp_reg;
9830 alu.src[0].chan = 0;
9831
9832 alu.dst.sel = ctx->temp_reg;
9833 alu.dst.chan = 0;
9834 alu.dst.write = 1;
9835 alu.last = 1;
9836 r = r600_bytecode_add_alu(ctx->bc, &alu);
9837 if (r)
9838 return r;
9839 }
9840 }
9841
9842 /* result.y = tmp - floor(tmp); */
9843 if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
9844 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9845
9846 alu.op = ALU_OP1_FRACT;
9847 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9848
9849 alu.dst.sel = ctx->temp_reg;
9850 #if 0
9851 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
9852 if (r)
9853 return r;
9854 #endif
9855 alu.dst.write = 1;
9856 alu.dst.chan = 1;
9857
9858 alu.last = 1;
9859
9860 r = r600_bytecode_add_alu(ctx->bc, &alu);
9861 if (r)
9862 return r;
9863 }
9864
9865 /* result.z = RoughApprox2ToX(tmp);*/
9866 if ((inst->Dst[0].Register.WriteMask >> 2) & 0x1) {
9867 if (ctx->bc->chip_class == CAYMAN) {
9868 for (i = 0; i < 3; i++) {
9869 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9870 alu.op = ALU_OP1_EXP_IEEE;
9871 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9872
9873 alu.dst.sel = ctx->temp_reg;
9874 alu.dst.chan = i;
9875 if (i == 2) {
9876 alu.dst.write = 1;
9877 alu.last = 1;
9878 }
9879
9880 r = r600_bytecode_add_alu(ctx->bc, &alu);
9881 if (r)
9882 return r;
9883 }
9884 } else {
9885 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9886 alu.op = ALU_OP1_EXP_IEEE;
9887 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9888
9889 alu.dst.sel = ctx->temp_reg;
9890 alu.dst.write = 1;
9891 alu.dst.chan = 2;
9892
9893 alu.last = 1;
9894
9895 r = r600_bytecode_add_alu(ctx->bc, &alu);
9896 if (r)
9897 return r;
9898 }
9899 }
9900
9901 /* result.w = 1.0;*/
9902 if ((inst->Dst[0].Register.WriteMask >> 3) & 0x1) {
9903 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9904
9905 alu.op = ALU_OP1_MOV;
9906 alu.src[0].sel = V_SQ_ALU_SRC_1;
9907 alu.src[0].chan = 0;
9908
9909 alu.dst.sel = ctx->temp_reg;
9910 alu.dst.chan = 3;
9911 alu.dst.write = 1;
9912 alu.last = 1;
9913 r = r600_bytecode_add_alu(ctx->bc, &alu);
9914 if (r)
9915 return r;
9916 }
9917 return tgsi_helper_copy(ctx, inst);
9918 }
9919
9920 static int tgsi_log(struct r600_shader_ctx *ctx)
9921 {
9922 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9923 struct r600_bytecode_alu alu;
9924 int r;
9925 unsigned i;
9926
9927 /* result.x = floor(log2(|src|)); */
9928 if (inst->Dst[0].Register.WriteMask & 1) {
9929 if (ctx->bc->chip_class == CAYMAN) {
9930 for (i = 0; i < 3; i++) {
9931 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9932
9933 alu.op = ALU_OP1_LOG_IEEE;
9934 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9935 r600_bytecode_src_set_abs(&alu.src[0]);
9936
9937 alu.dst.sel = ctx->temp_reg;
9938 alu.dst.chan = i;
9939 if (i == 0)
9940 alu.dst.write = 1;
9941 if (i == 2)
9942 alu.last = 1;
9943 r = r600_bytecode_add_alu(ctx->bc, &alu);
9944 if (r)
9945 return r;
9946 }
9947
9948 } else {
9949 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9950
9951 alu.op = ALU_OP1_LOG_IEEE;
9952 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9953 r600_bytecode_src_set_abs(&alu.src[0]);
9954
9955 alu.dst.sel = ctx->temp_reg;
9956 alu.dst.chan = 0;
9957 alu.dst.write = 1;
9958 alu.last = 1;
9959 r = r600_bytecode_add_alu(ctx->bc, &alu);
9960 if (r)
9961 return r;
9962 }
9963
9964 alu.op = ALU_OP1_FLOOR;
9965 alu.src[0].sel = ctx->temp_reg;
9966 alu.src[0].chan = 0;
9967
9968 alu.dst.sel = ctx->temp_reg;
9969 alu.dst.chan = 0;
9970 alu.dst.write = 1;
9971 alu.last = 1;
9972
9973 r = r600_bytecode_add_alu(ctx->bc, &alu);
9974 if (r)
9975 return r;
9976 }
9977
9978 /* result.y = |src.x| / (2 ^ floor(log2(|src.x|))); */
9979 if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
9980
9981 if (ctx->bc->chip_class == CAYMAN) {
9982 for (i = 0; i < 3; i++) {
9983 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9984
9985 alu.op = ALU_OP1_LOG_IEEE;
9986 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9987 r600_bytecode_src_set_abs(&alu.src[0]);
9988
9989 alu.dst.sel = ctx->temp_reg;
9990 alu.dst.chan = i;
9991 if (i == 1)
9992 alu.dst.write = 1;
9993 if (i == 2)
9994 alu.last = 1;
9995
9996 r = r600_bytecode_add_alu(ctx->bc, &alu);
9997 if (r)
9998 return r;
9999 }
10000 } else {
10001 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10002
10003 alu.op = ALU_OP1_LOG_IEEE;
10004 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
10005 r600_bytecode_src_set_abs(&alu.src[0]);
10006
10007 alu.dst.sel = ctx->temp_reg;
10008 alu.dst.chan = 1;
10009 alu.dst.write = 1;
10010 alu.last = 1;
10011
10012 r = r600_bytecode_add_alu(ctx->bc, &alu);
10013 if (r)
10014 return r;
10015 }
10016
10017 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10018
10019 alu.op = ALU_OP1_FLOOR;
10020 alu.src[0].sel = ctx->temp_reg;
10021 alu.src[0].chan = 1;
10022
10023 alu.dst.sel = ctx->temp_reg;
10024 alu.dst.chan = 1;
10025 alu.dst.write = 1;
10026 alu.last = 1;
10027
10028 r = r600_bytecode_add_alu(ctx->bc, &alu);
10029 if (r)
10030 return r;
10031
10032 if (ctx->bc->chip_class == CAYMAN) {
10033 for (i = 0; i < 3; i++) {
10034 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10035 alu.op = ALU_OP1_EXP_IEEE;
10036 alu.src[0].sel = ctx->temp_reg;
10037 alu.src[0].chan = 1;
10038
10039 alu.dst.sel = ctx->temp_reg;
10040 alu.dst.chan = i;
10041 if (i == 1)
10042 alu.dst.write = 1;
10043 if (i == 2)
10044 alu.last = 1;
10045
10046 r = r600_bytecode_add_alu(ctx->bc, &alu);
10047 if (r)
10048 return r;
10049 }
10050 } else {
10051 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10052 alu.op = ALU_OP1_EXP_IEEE;
10053 alu.src[0].sel = ctx->temp_reg;
10054 alu.src[0].chan = 1;
10055
10056 alu.dst.sel = ctx->temp_reg;
10057 alu.dst.chan = 1;
10058 alu.dst.write = 1;
10059 alu.last = 1;
10060
10061 r = r600_bytecode_add_alu(ctx->bc, &alu);
10062 if (r)
10063 return r;
10064 }
10065
10066 if (ctx->bc->chip_class == CAYMAN) {
10067 for (i = 0; i < 3; i++) {
10068 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10069 alu.op = ALU_OP1_RECIP_IEEE;
10070 alu.src[0].sel = ctx->temp_reg;
10071 alu.src[0].chan = 1;
10072
10073 alu.dst.sel = ctx->temp_reg;
10074 alu.dst.chan = i;
10075 if (i == 1)
10076 alu.dst.write = 1;
10077 if (i == 2)
10078 alu.last = 1;
10079
10080 r = r600_bytecode_add_alu(ctx->bc, &alu);
10081 if (r)
10082 return r;
10083 }
10084 } else {
10085 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10086 alu.op = ALU_OP1_RECIP_IEEE;
10087 alu.src[0].sel = ctx->temp_reg;
10088 alu.src[0].chan = 1;
10089
10090 alu.dst.sel = ctx->temp_reg;
10091 alu.dst.chan = 1;
10092 alu.dst.write = 1;
10093 alu.last = 1;
10094
10095 r = r600_bytecode_add_alu(ctx->bc, &alu);
10096 if (r)
10097 return r;
10098 }
10099
10100 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10101
10102 alu.op = ALU_OP2_MUL;
10103
10104 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
10105 r600_bytecode_src_set_abs(&alu.src[0]);
10106
10107 alu.src[1].sel = ctx->temp_reg;
10108 alu.src[1].chan = 1;
10109
10110 alu.dst.sel = ctx->temp_reg;
10111 alu.dst.chan = 1;
10112 alu.dst.write = 1;
10113 alu.last = 1;
10114
10115 r = r600_bytecode_add_alu(ctx->bc, &alu);
10116 if (r)
10117 return r;
10118 }
10119
10120 /* result.z = log2(|src|);*/
10121 if ((inst->Dst[0].Register.WriteMask >> 2) & 1) {
10122 if (ctx->bc->chip_class == CAYMAN) {
10123 for (i = 0; i < 3; i++) {
10124 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10125
10126 alu.op = ALU_OP1_LOG_IEEE;
10127 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
10128 r600_bytecode_src_set_abs(&alu.src[0]);
10129
10130 alu.dst.sel = ctx->temp_reg;
10131 if (i == 2)
10132 alu.dst.write = 1;
10133 alu.dst.chan = i;
10134 if (i == 2)
10135 alu.last = 1;
10136
10137 r = r600_bytecode_add_alu(ctx->bc, &alu);
10138 if (r)
10139 return r;
10140 }
10141 } else {
10142 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10143
10144 alu.op = ALU_OP1_LOG_IEEE;
10145 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
10146 r600_bytecode_src_set_abs(&alu.src[0]);
10147
10148 alu.dst.sel = ctx->temp_reg;
10149 alu.dst.write = 1;
10150 alu.dst.chan = 2;
10151 alu.last = 1;
10152
10153 r = r600_bytecode_add_alu(ctx->bc, &alu);
10154 if (r)
10155 return r;
10156 }
10157 }
10158
10159 /* result.w = 1.0; */
10160 if ((inst->Dst[0].Register.WriteMask >> 3) & 1) {
10161 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10162
10163 alu.op = ALU_OP1_MOV;
10164 alu.src[0].sel = V_SQ_ALU_SRC_1;
10165 alu.src[0].chan = 0;
10166
10167 alu.dst.sel = ctx->temp_reg;
10168 alu.dst.chan = 3;
10169 alu.dst.write = 1;
10170 alu.last = 1;
10171
10172 r = r600_bytecode_add_alu(ctx->bc, &alu);
10173 if (r)
10174 return r;
10175 }
10176
10177 return tgsi_helper_copy(ctx, inst);
10178 }
10179
10180 static int tgsi_eg_arl(struct r600_shader_ctx *ctx)
10181 {
10182 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10183 struct r600_bytecode_alu alu;
10184 int r;
10185 int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
10186 unsigned reg = get_address_file_reg(ctx, inst->Dst[0].Register.Index);
10187
10188 assert(inst->Dst[0].Register.Index < 3);
10189 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10190
10191 switch (inst->Instruction.Opcode) {
10192 case TGSI_OPCODE_ARL:
10193 alu.op = ALU_OP1_FLT_TO_INT_FLOOR;
10194 break;
10195 case TGSI_OPCODE_ARR:
10196 alu.op = ALU_OP1_FLT_TO_INT;
10197 break;
10198 case TGSI_OPCODE_UARL:
10199 alu.op = ALU_OP1_MOV;
10200 break;
10201 default:
10202 assert(0);
10203 return -1;
10204 }
10205
10206 for (i = 0; i <= lasti; ++i) {
10207 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
10208 continue;
10209 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
10210 alu.last = i == lasti;
10211 alu.dst.sel = reg;
10212 alu.dst.chan = i;
10213 alu.dst.write = 1;
10214 r = r600_bytecode_add_alu(ctx->bc, &alu);
10215 if (r)
10216 return r;
10217 }
10218
10219 if (inst->Dst[0].Register.Index > 0)
10220 ctx->bc->index_loaded[inst->Dst[0].Register.Index - 1] = 0;
10221 else
10222 ctx->bc->ar_loaded = 0;
10223
10224 return 0;
10225 }
10226 static int tgsi_r600_arl(struct r600_shader_ctx *ctx)
10227 {
10228 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10229 struct r600_bytecode_alu alu;
10230 int r;
10231 int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
10232
10233 switch (inst->Instruction.Opcode) {
10234 case TGSI_OPCODE_ARL:
10235 memset(&alu, 0, sizeof(alu));
10236 alu.op = ALU_OP1_FLOOR;
10237 alu.dst.sel = ctx->bc->ar_reg;
10238 alu.dst.write = 1;
10239 for (i = 0; i <= lasti; ++i) {
10240 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
10241 alu.dst.chan = i;
10242 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
10243 alu.last = i == lasti;
10244 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
10245 return r;
10246 }
10247 }
10248
10249 memset(&alu, 0, sizeof(alu));
10250 alu.op = ALU_OP1_FLT_TO_INT;
10251 alu.src[0].sel = ctx->bc->ar_reg;
10252 alu.dst.sel = ctx->bc->ar_reg;
10253 alu.dst.write = 1;
10254 /* FLT_TO_INT is trans-only on r600/r700 */
10255 alu.last = TRUE;
10256 for (i = 0; i <= lasti; ++i) {
10257 alu.dst.chan = i;
10258 alu.src[0].chan = i;
10259 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
10260 return r;
10261 }
10262 break;
10263 case TGSI_OPCODE_ARR:
10264 memset(&alu, 0, sizeof(alu));
10265 alu.op = ALU_OP1_FLT_TO_INT;
10266 alu.dst.sel = ctx->bc->ar_reg;
10267 alu.dst.write = 1;
10268 /* FLT_TO_INT is trans-only on r600/r700 */
10269 alu.last = TRUE;
10270 for (i = 0; i <= lasti; ++i) {
10271 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
10272 alu.dst.chan = i;
10273 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
10274 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
10275 return r;
10276 }
10277 }
10278 break;
10279 case TGSI_OPCODE_UARL:
10280 memset(&alu, 0, sizeof(alu));
10281 alu.op = ALU_OP1_MOV;
10282 alu.dst.sel = ctx->bc->ar_reg;
10283 alu.dst.write = 1;
10284 for (i = 0; i <= lasti; ++i) {
10285 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
10286 alu.dst.chan = i;
10287 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
10288 alu.last = i == lasti;
10289 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
10290 return r;
10291 }
10292 }
10293 break;
10294 default:
10295 assert(0);
10296 return -1;
10297 }
10298
10299 ctx->bc->ar_loaded = 0;
10300 return 0;
10301 }
10302
10303 static int tgsi_opdst(struct r600_shader_ctx *ctx)
10304 {
10305 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10306 struct r600_bytecode_alu alu;
10307 int i, r = 0;
10308
10309 for (i = 0; i < 4; i++) {
10310 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10311
10312 alu.op = ALU_OP2_MUL;
10313 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
10314
10315 if (i == 0 || i == 3) {
10316 alu.src[0].sel = V_SQ_ALU_SRC_1;
10317 } else {
10318 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
10319 }
10320
10321 if (i == 0 || i == 2) {
10322 alu.src[1].sel = V_SQ_ALU_SRC_1;
10323 } else {
10324 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
10325 }
10326 if (i == 3)
10327 alu.last = 1;
10328 r = r600_bytecode_add_alu(ctx->bc, &alu);
10329 if (r)
10330 return r;
10331 }
10332 return 0;
10333 }
10334
10335 static int emit_logic_pred(struct r600_shader_ctx *ctx, int opcode, int alu_type,
10336 struct r600_bytecode_alu_src *src)
10337 {
10338 struct r600_bytecode_alu alu;
10339 int r;
10340
10341 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10342 alu.op = opcode;
10343 alu.execute_mask = 1;
10344 alu.update_pred = 1;
10345
10346 alu.dst.sel = ctx->temp_reg;
10347 alu.dst.write = 1;
10348 alu.dst.chan = 0;
10349
10350 alu.src[0] = *src;
10351 alu.src[1].sel = V_SQ_ALU_SRC_0;
10352 alu.src[1].chan = 0;
10353
10354 alu.last = 1;
10355
10356 r = r600_bytecode_add_alu_type(ctx->bc, &alu, alu_type);
10357 if (r)
10358 return r;
10359 return 0;
10360 }
10361
10362 static int pops(struct r600_shader_ctx *ctx, int pops)
10363 {
10364 unsigned force_pop = ctx->bc->force_add_cf;
10365
10366 if (!force_pop) {
10367 int alu_pop = 3;
10368 if (ctx->bc->cf_last) {
10369 if (ctx->bc->cf_last->op == CF_OP_ALU)
10370 alu_pop = 0;
10371 else if (ctx->bc->cf_last->op == CF_OP_ALU_POP_AFTER)
10372 alu_pop = 1;
10373 }
10374 alu_pop += pops;
10375 if (alu_pop == 1) {
10376 ctx->bc->cf_last->op = CF_OP_ALU_POP_AFTER;
10377 ctx->bc->force_add_cf = 1;
10378 } else if (alu_pop == 2) {
10379 ctx->bc->cf_last->op = CF_OP_ALU_POP2_AFTER;
10380 ctx->bc->force_add_cf = 1;
10381 } else {
10382 force_pop = 1;
10383 }
10384 }
10385
10386 if (force_pop) {
10387 r600_bytecode_add_cfinst(ctx->bc, CF_OP_POP);
10388 ctx->bc->cf_last->pop_count = pops;
10389 ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2;
10390 }
10391
10392 return 0;
10393 }
10394
10395 static inline int callstack_update_max_depth(struct r600_shader_ctx *ctx,
10396 unsigned reason)
10397 {
10398 struct r600_stack_info *stack = &ctx->bc->stack;
10399 unsigned elements;
10400 int entries;
10401
10402 unsigned entry_size = stack->entry_size;
10403
10404 elements = (stack->loop + stack->push_wqm ) * entry_size;
10405 elements += stack->push;
10406
10407 switch (ctx->bc->chip_class) {
10408 case R600:
10409 case R700:
10410 /* pre-r8xx: if any non-WQM PUSH instruction is invoked, 2 elements on
10411 * the stack must be reserved to hold the current active/continue
10412 * masks */
10413 if (reason == FC_PUSH_VPM || stack->push > 0) {
10414 elements += 2;
10415 }
10416 break;
10417
10418 case CAYMAN:
10419 /* r9xx: any stack operation on empty stack consumes 2 additional
10420 * elements */
10421 elements += 2;
10422
10423 /* fallthrough */
10424 /* FIXME: do the two elements added above cover the cases for the
10425 * r8xx+ below? */
10426
10427 case EVERGREEN:
10428 /* r8xx+: 2 extra elements are not always required, but one extra
10429 * element must be added for each of the following cases:
10430 * 1. There is an ALU_ELSE_AFTER instruction at the point of greatest
10431 * stack usage.
10432 * (Currently we don't use ALU_ELSE_AFTER.)
10433 * 2. There are LOOP/WQM frames on the stack when any flavor of non-WQM
10434 * PUSH instruction executed.
10435 *
10436 * NOTE: it seems we also need to reserve additional element in some
10437 * other cases, e.g. when we have 4 levels of PUSH_VPM in the shader,
10438 * then STACK_SIZE should be 2 instead of 1 */
10439 if (reason == FC_PUSH_VPM || stack->push > 0) {
10440 elements += 1;
10441 }
10442 break;
10443
10444 default:
10445 assert(0);
10446 break;
10447 }
10448
10449 /* NOTE: it seems STACK_SIZE is interpreted by hw as if entry_size is 4
10450 * for all chips, so we use 4 in the final formula, not the real entry_size
10451 * for the chip */
10452 entry_size = 4;
10453
10454 entries = (elements + (entry_size - 1)) / entry_size;
10455
10456 if (entries > stack->max_entries)
10457 stack->max_entries = entries;
10458 return elements;
10459 }
10460
10461 static inline void callstack_pop(struct r600_shader_ctx *ctx, unsigned reason)
10462 {
10463 switch(reason) {
10464 case FC_PUSH_VPM:
10465 --ctx->bc->stack.push;
10466 assert(ctx->bc->stack.push >= 0);
10467 break;
10468 case FC_PUSH_WQM:
10469 --ctx->bc->stack.push_wqm;
10470 assert(ctx->bc->stack.push_wqm >= 0);
10471 break;
10472 case FC_LOOP:
10473 --ctx->bc->stack.loop;
10474 assert(ctx->bc->stack.loop >= 0);
10475 break;
10476 default:
10477 assert(0);
10478 break;
10479 }
10480 }
10481
10482 static inline int callstack_push(struct r600_shader_ctx *ctx, unsigned reason)
10483 {
10484 switch (reason) {
10485 case FC_PUSH_VPM:
10486 ++ctx->bc->stack.push;
10487 break;
10488 case FC_PUSH_WQM:
10489 ++ctx->bc->stack.push_wqm;
10490 break;
10491 case FC_LOOP:
10492 ++ctx->bc->stack.loop;
10493 break;
10494 default:
10495 assert(0);
10496 }
10497
10498 return callstack_update_max_depth(ctx, reason);
10499 }
10500
10501 static void fc_set_mid(struct r600_shader_ctx *ctx, int fc_sp)
10502 {
10503 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[fc_sp];
10504
10505 sp->mid = realloc((void *)sp->mid,
10506 sizeof(struct r600_bytecode_cf *) * (sp->num_mid + 1));
10507 sp->mid[sp->num_mid] = ctx->bc->cf_last;
10508 sp->num_mid++;
10509 }
10510
10511 static void fc_pushlevel(struct r600_shader_ctx *ctx, int type)
10512 {
10513 assert(ctx->bc->fc_sp < ARRAY_SIZE(ctx->bc->fc_stack));
10514 ctx->bc->fc_stack[ctx->bc->fc_sp].type = type;
10515 ctx->bc->fc_stack[ctx->bc->fc_sp].start = ctx->bc->cf_last;
10516 ctx->bc->fc_sp++;
10517 }
10518
10519 static void fc_poplevel(struct r600_shader_ctx *ctx)
10520 {
10521 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[ctx->bc->fc_sp - 1];
10522 free(sp->mid);
10523 sp->mid = NULL;
10524 sp->num_mid = 0;
10525 sp->start = NULL;
10526 sp->type = 0;
10527 ctx->bc->fc_sp--;
10528 }
10529
10530 #if 0
10531 static int emit_return(struct r600_shader_ctx *ctx)
10532 {
10533 r600_bytecode_add_cfinst(ctx->bc, CF_OP_RETURN));
10534 return 0;
10535 }
10536
10537 static int emit_jump_to_offset(struct r600_shader_ctx *ctx, int pops, int offset)
10538 {
10539
10540 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP));
10541 ctx->bc->cf_last->pop_count = pops;
10542 /* XXX work out offset */
10543 return 0;
10544 }
10545
10546 static int emit_setret_in_loop_flag(struct r600_shader_ctx *ctx, unsigned flag_value)
10547 {
10548 return 0;
10549 }
10550
10551 static void emit_testflag(struct r600_shader_ctx *ctx)
10552 {
10553
10554 }
10555
10556 static void emit_return_on_flag(struct r600_shader_ctx *ctx, unsigned ifidx)
10557 {
10558 emit_testflag(ctx);
10559 emit_jump_to_offset(ctx, 1, 4);
10560 emit_setret_in_loop_flag(ctx, V_SQ_ALU_SRC_0);
10561 pops(ctx, ifidx + 1);
10562 emit_return(ctx);
10563 }
10564
10565 static void break_loop_on_flag(struct r600_shader_ctx *ctx, unsigned fc_sp)
10566 {
10567 emit_testflag(ctx);
10568
10569 r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
10570 ctx->bc->cf_last->pop_count = 1;
10571
10572 fc_set_mid(ctx, fc_sp);
10573
10574 pops(ctx, 1);
10575 }
10576 #endif
10577
10578 static int emit_if(struct r600_shader_ctx *ctx, int opcode,
10579 struct r600_bytecode_alu_src *src)
10580 {
10581 int alu_type = CF_OP_ALU_PUSH_BEFORE;
10582 bool needs_workaround = false;
10583 int elems = callstack_push(ctx, FC_PUSH_VPM);
10584
10585 if (ctx->bc->chip_class == CAYMAN && ctx->bc->stack.loop > 1)
10586 needs_workaround = true;
10587
10588 if (ctx->bc->chip_class == EVERGREEN && ctx_needs_stack_workaround_8xx(ctx)) {
10589 unsigned dmod1 = (elems - 1) % ctx->bc->stack.entry_size;
10590 unsigned dmod2 = (elems) % ctx->bc->stack.entry_size;
10591
10592 if (elems && (!dmod1 || !dmod2))
10593 needs_workaround = true;
10594 }
10595
10596 /* There is a hardware bug on Cayman where a BREAK/CONTINUE followed by
10597 * LOOP_STARTxxx for nested loops may put the branch stack into a state
10598 * such that ALU_PUSH_BEFORE doesn't work as expected. Workaround this
10599 * by replacing the ALU_PUSH_BEFORE with a PUSH + ALU */
10600 if (needs_workaround) {
10601 r600_bytecode_add_cfinst(ctx->bc, CF_OP_PUSH);
10602 ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2;
10603 alu_type = CF_OP_ALU;
10604 }
10605
10606 emit_logic_pred(ctx, opcode, alu_type, src);
10607
10608 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP);
10609
10610 fc_pushlevel(ctx, FC_IF);
10611
10612 return 0;
10613 }
10614
10615 static int tgsi_if(struct r600_shader_ctx *ctx)
10616 {
10617 struct r600_bytecode_alu_src alu_src;
10618 r600_bytecode_src(&alu_src, &ctx->src[0], 0);
10619
10620 return emit_if(ctx, ALU_OP2_PRED_SETNE, &alu_src);
10621 }
10622
10623 static int tgsi_uif(struct r600_shader_ctx *ctx)
10624 {
10625 struct r600_bytecode_alu_src alu_src;
10626 r600_bytecode_src(&alu_src, &ctx->src[0], 0);
10627 return emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src);
10628 }
10629
10630 static int tgsi_else(struct r600_shader_ctx *ctx)
10631 {
10632 r600_bytecode_add_cfinst(ctx->bc, CF_OP_ELSE);
10633 ctx->bc->cf_last->pop_count = 1;
10634
10635 fc_set_mid(ctx, ctx->bc->fc_sp - 1);
10636 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id;
10637 return 0;
10638 }
10639
10640 static int tgsi_endif(struct r600_shader_ctx *ctx)
10641 {
10642 int offset = 2;
10643 pops(ctx, 1);
10644 if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].type != FC_IF) {
10645 R600_ERR("if/endif unbalanced in shader\n");
10646 return -1;
10647 }
10648
10649 /* ALU_EXTENDED needs 4 DWords instead of two, adjust jump target offset accordingly */
10650 if (ctx->bc->cf_last->eg_alu_extended)
10651 offset += 2;
10652
10653 if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid == NULL) {
10654 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id + offset;
10655 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->pop_count = 1;
10656 } else {
10657 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid[0]->cf_addr = ctx->bc->cf_last->id + offset;
10658 }
10659 fc_poplevel(ctx);
10660
10661 callstack_pop(ctx, FC_PUSH_VPM);
10662 return 0;
10663 }
10664
10665 static int tgsi_bgnloop(struct r600_shader_ctx *ctx)
10666 {
10667 /* LOOP_START_DX10 ignores the LOOP_CONFIG* registers, so it is not
10668 * limited to 4096 iterations, like the other LOOP_* instructions. */
10669 r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_START_DX10);
10670
10671 fc_pushlevel(ctx, FC_LOOP);
10672
10673 /* check stack depth */
10674 callstack_push(ctx, FC_LOOP);
10675 return 0;
10676 }
10677
10678 static int tgsi_endloop(struct r600_shader_ctx *ctx)
10679 {
10680 int i;
10681
10682 r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_END);
10683
10684 if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].type != FC_LOOP) {
10685 R600_ERR("loop/endloop in shader code are not paired.\n");
10686 return -EINVAL;
10687 }
10688
10689 /* fixup loop pointers - from r600isa
10690 LOOP END points to CF after LOOP START,
10691 LOOP START point to CF after LOOP END
10692 BRK/CONT point to LOOP END CF
10693 */
10694 ctx->bc->cf_last->cf_addr = ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->id + 2;
10695
10696 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id + 2;
10697
10698 for (i = 0; i < ctx->bc->fc_stack[ctx->bc->fc_sp - 1].num_mid; i++) {
10699 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid[i]->cf_addr = ctx->bc->cf_last->id;
10700 }
10701 /* XXX add LOOPRET support */
10702 fc_poplevel(ctx);
10703 callstack_pop(ctx, FC_LOOP);
10704 return 0;
10705 }
10706
10707 static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx)
10708 {
10709 unsigned int fscp;
10710
10711 for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--)
10712 {
10713 if (FC_LOOP == ctx->bc->fc_stack[fscp - 1].type)
10714 break;
10715 }
10716
10717 if (fscp == 0) {
10718 R600_ERR("Break not inside loop/endloop pair\n");
10719 return -EINVAL;
10720 }
10721
10722 r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
10723
10724 fc_set_mid(ctx, fscp - 1);
10725
10726 return 0;
10727 }
10728
10729 static int tgsi_gs_emit(struct r600_shader_ctx *ctx)
10730 {
10731 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10732 int stream = ctx->literals[inst->Src[0].Register.Index * 4 + inst->Src[0].Register.SwizzleX];
10733 int r;
10734
10735 if (ctx->inst_info->op == CF_OP_EMIT_VERTEX)
10736 emit_gs_ring_writes(ctx, ctx->gs_stream_output_info, stream, TRUE);
10737
10738 r = r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
10739 if (!r) {
10740 ctx->bc->cf_last->count = stream; // Count field for CUT/EMIT_VERTEX indicates which stream
10741 if (ctx->inst_info->op == CF_OP_EMIT_VERTEX)
10742 return emit_inc_ring_offset(ctx, stream, TRUE);
10743 }
10744 return r;
10745 }
10746
10747 static int tgsi_umad(struct r600_shader_ctx *ctx)
10748 {
10749 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10750 struct r600_bytecode_alu alu;
10751 int i, j, r;
10752 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
10753
10754 /* src0 * src1 */
10755 for (i = 0; i < lasti + 1; i++) {
10756 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
10757 continue;
10758
10759 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10760
10761 alu.dst.chan = i;
10762 alu.dst.sel = ctx->temp_reg;
10763 alu.dst.write = 1;
10764
10765 alu.op = ALU_OP2_MULLO_UINT;
10766 for (j = 0; j < 2; j++) {
10767 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
10768 }
10769
10770 alu.last = 1;
10771 r = emit_mul_int_op(ctx->bc, &alu);
10772 if (r)
10773 return r;
10774 }
10775
10776
10777 for (i = 0; i < lasti + 1; i++) {
10778 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
10779 continue;
10780
10781 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10782 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
10783
10784 alu.op = ALU_OP2_ADD_INT;
10785
10786 alu.src[0].sel = ctx->temp_reg;
10787 alu.src[0].chan = i;
10788
10789 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
10790 if (i == lasti) {
10791 alu.last = 1;
10792 }
10793 r = r600_bytecode_add_alu(ctx->bc, &alu);
10794 if (r)
10795 return r;
10796 }
10797 return 0;
10798 }
10799
10800 static int tgsi_pk2h(struct r600_shader_ctx *ctx)
10801 {
10802 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10803 struct r600_bytecode_alu alu;
10804 int r, i;
10805 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
10806
10807 /* temp.xy = f32_to_f16(src) */
10808 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10809 alu.op = ALU_OP1_FLT32_TO_FLT16;
10810 alu.dst.chan = 0;
10811 alu.dst.sel = ctx->temp_reg;
10812 alu.dst.write = 1;
10813 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
10814 r = r600_bytecode_add_alu(ctx->bc, &alu);
10815 if (r)
10816 return r;
10817 alu.dst.chan = 1;
10818 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
10819 alu.last = 1;
10820 r = r600_bytecode_add_alu(ctx->bc, &alu);
10821 if (r)
10822 return r;
10823
10824 /* dst.x = temp.y * 0x10000 + temp.x */
10825 for (i = 0; i < lasti + 1; i++) {
10826 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
10827 continue;
10828
10829 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10830 alu.op = ALU_OP3_MULADD_UINT24;
10831 alu.is_op3 = 1;
10832 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
10833 alu.last = i == lasti;
10834 alu.src[0].sel = ctx->temp_reg;
10835 alu.src[0].chan = 1;
10836 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
10837 alu.src[1].value = 0x10000;
10838 alu.src[2].sel = ctx->temp_reg;
10839 alu.src[2].chan = 0;
10840 r = r600_bytecode_add_alu(ctx->bc, &alu);
10841 if (r)
10842 return r;
10843 }
10844
10845 return 0;
10846 }
10847
10848 static int tgsi_up2h(struct r600_shader_ctx *ctx)
10849 {
10850 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10851 struct r600_bytecode_alu alu;
10852 int r, i;
10853 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
10854
10855 /* temp.x = src.x */
10856 /* note: no need to mask out the high bits */
10857 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10858 alu.op = ALU_OP1_MOV;
10859 alu.dst.chan = 0;
10860 alu.dst.sel = ctx->temp_reg;
10861 alu.dst.write = 1;
10862 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
10863 r = r600_bytecode_add_alu(ctx->bc, &alu);
10864 if (r)
10865 return r;
10866
10867 /* temp.y = src.x >> 16 */
10868 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10869 alu.op = ALU_OP2_LSHR_INT;
10870 alu.dst.chan = 1;
10871 alu.dst.sel = ctx->temp_reg;
10872 alu.dst.write = 1;
10873 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
10874 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
10875 alu.src[1].value = 16;
10876 alu.last = 1;
10877 r = r600_bytecode_add_alu(ctx->bc, &alu);
10878 if (r)
10879 return r;
10880
10881 /* dst.wz = dst.xy = f16_to_f32(temp.xy) */
10882 for (i = 0; i < lasti + 1; i++) {
10883 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
10884 continue;
10885 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10886 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
10887 alu.op = ALU_OP1_FLT16_TO_FLT32;
10888 alu.src[0].sel = ctx->temp_reg;
10889 alu.src[0].chan = i % 2;
10890 alu.last = i == lasti;
10891 r = r600_bytecode_add_alu(ctx->bc, &alu);
10892 if (r)
10893 return r;
10894 }
10895
10896 return 0;
10897 }
10898
10899 static int tgsi_bfe(struct r600_shader_ctx *ctx)
10900 {
10901 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10902 struct r600_bytecode_alu alu;
10903 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
10904 int r, i;
10905 int dst = -1;
10906
10907 if ((inst->Src[0].Register.File == inst->Dst[0].Register.File &&
10908 inst->Src[0].Register.Index == inst->Dst[0].Register.Index) ||
10909 (inst->Src[2].Register.File == inst->Dst[0].Register.File &&
10910 inst->Src[2].Register.Index == inst->Dst[0].Register.Index))
10911 dst = r600_get_temp(ctx);
10912
10913 r = tgsi_op3_dst(ctx, dst);
10914 if (r)
10915 return r;
10916
10917 for (i = 0; i < lasti + 1; i++) {
10918 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10919 alu.op = ALU_OP2_SETGE_INT;
10920 r600_bytecode_src(&alu.src[0], &ctx->src[2], i);
10921 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
10922 alu.src[1].value = 32;
10923 alu.dst.sel = ctx->temp_reg;
10924 alu.dst.chan = i;
10925 alu.dst.write = 1;
10926 if (i == lasti)
10927 alu.last = 1;
10928 r = r600_bytecode_add_alu(ctx->bc, &alu);
10929 if (r)
10930 return r;
10931 }
10932
10933 for (i = 0; i < lasti + 1; i++) {
10934 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10935 alu.op = ALU_OP3_CNDE_INT;
10936 alu.is_op3 = 1;
10937 alu.src[0].sel = ctx->temp_reg;
10938 alu.src[0].chan = i;
10939
10940 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
10941 if (dst != -1)
10942 alu.src[1].sel = dst;
10943 else
10944 alu.src[1].sel = alu.dst.sel;
10945 alu.src[1].chan = i;
10946 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
10947 alu.dst.write = 1;
10948 if (i == lasti)
10949 alu.last = 1;
10950 r = r600_bytecode_add_alu(ctx->bc, &alu);
10951 if (r)
10952 return r;
10953 }
10954
10955 return 0;
10956 }
10957
10958 static int tgsi_clock(struct r600_shader_ctx *ctx)
10959 {
10960 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10961 struct r600_bytecode_alu alu;
10962 int r;
10963
10964 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10965 alu.op = ALU_OP1_MOV;
10966 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
10967 alu.src[0].sel = EG_V_SQ_ALU_SRC_TIME_LO;
10968 r = r600_bytecode_add_alu(ctx->bc, &alu);
10969 if (r)
10970 return r;
10971 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10972 alu.op = ALU_OP1_MOV;
10973 tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
10974 alu.src[0].sel = EG_V_SQ_ALU_SRC_TIME_HI;
10975 alu.last = 1;
10976 r = r600_bytecode_add_alu(ctx->bc, &alu);
10977 if (r)
10978 return r;
10979 return 0;
10980 }
10981
10982 static int emit_u64add(struct r600_shader_ctx *ctx, int op,
10983 int treg,
10984 int src0_sel, int src0_chan,
10985 int src1_sel, int src1_chan)
10986 {
10987 struct r600_bytecode_alu alu;
10988 int r;
10989 int opc;
10990
10991 if (op == ALU_OP2_ADD_INT)
10992 opc = ALU_OP2_ADDC_UINT;
10993 else
10994 opc = ALU_OP2_SUBB_UINT;
10995
10996 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10997 alu.op = op; ;
10998 alu.dst.sel = treg;
10999 alu.dst.chan = 0;
11000 alu.dst.write = 1;
11001 alu.src[0].sel = src0_sel;
11002 alu.src[0].chan = src0_chan + 0;
11003 alu.src[1].sel = src1_sel;
11004 alu.src[1].chan = src1_chan + 0;
11005 alu.src[1].neg = 0;
11006 r = r600_bytecode_add_alu(ctx->bc, &alu);
11007 if (r)
11008 return r;
11009
11010 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11011 alu.op = op;
11012 alu.dst.sel = treg;
11013 alu.dst.chan = 1;
11014 alu.dst.write = 1;
11015 alu.src[0].sel = src0_sel;
11016 alu.src[0].chan = src0_chan + 1;
11017 alu.src[1].sel = src1_sel;
11018 alu.src[1].chan = src1_chan + 1;
11019 alu.src[1].neg = 0;
11020 r = r600_bytecode_add_alu(ctx->bc, &alu);
11021 if (r)
11022 return r;
11023
11024 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11025 alu.op = opc;
11026 alu.dst.sel = treg;
11027 alu.dst.chan = 2;
11028 alu.dst.write = 1;
11029 alu.last = 1;
11030 alu.src[0].sel = src0_sel;
11031 alu.src[0].chan = src0_chan + 0;
11032 alu.src[1].sel = src1_sel;
11033 alu.src[1].chan = src1_chan + 0;
11034 alu.src[1].neg = 0;
11035 r = r600_bytecode_add_alu(ctx->bc, &alu);
11036 if (r)
11037 return r;
11038
11039 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11040 alu.op = op;
11041 alu.dst.sel = treg;
11042 alu.dst.chan = 1;
11043 alu.dst.write = 1;
11044 alu.src[0].sel = treg;
11045 alu.src[0].chan = 1;
11046 alu.src[1].sel = treg;
11047 alu.src[1].chan = 2;
11048 alu.last = 1;
11049 r = r600_bytecode_add_alu(ctx->bc, &alu);
11050 if (r)
11051 return r;
11052 return 0;
11053 }
11054
11055 static int egcm_u64add(struct r600_shader_ctx *ctx)
11056 {
11057 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
11058 struct r600_bytecode_alu alu;
11059 int r;
11060 int treg = ctx->temp_reg;
11061 int op = ALU_OP2_ADD_INT, opc = ALU_OP2_ADDC_UINT;
11062
11063 if (ctx->src[1].neg) {
11064 op = ALU_OP2_SUB_INT;
11065 opc = ALU_OP2_SUBB_UINT;
11066 }
11067 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11068 alu.op = op; ;
11069 alu.dst.sel = treg;
11070 alu.dst.chan = 0;
11071 alu.dst.write = 1;
11072 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
11073 r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
11074 alu.src[1].neg = 0;
11075 r = r600_bytecode_add_alu(ctx->bc, &alu);
11076 if (r)
11077 return r;
11078
11079 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11080 alu.op = op;
11081 alu.dst.sel = treg;
11082 alu.dst.chan = 1;
11083 alu.dst.write = 1;
11084 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
11085 r600_bytecode_src(&alu.src[1], &ctx->src[1], 1);
11086 alu.src[1].neg = 0;
11087 r = r600_bytecode_add_alu(ctx->bc, &alu);
11088 if (r)
11089 return r;
11090
11091 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11092 alu.op = opc ;
11093 alu.dst.sel = treg;
11094 alu.dst.chan = 2;
11095 alu.dst.write = 1;
11096 alu.last = 1;
11097 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
11098 r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
11099 alu.src[1].neg = 0;
11100 r = r600_bytecode_add_alu(ctx->bc, &alu);
11101 if (r)
11102 return r;
11103
11104 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11105 alu.op = op;
11106 tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
11107 alu.src[0].sel = treg;
11108 alu.src[0].chan = 1;
11109 alu.src[1].sel = treg;
11110 alu.src[1].chan = 2;
11111 alu.last = 1;
11112 r = r600_bytecode_add_alu(ctx->bc, &alu);
11113 if (r)
11114 return r;
11115 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11116 alu.op = ALU_OP1_MOV;
11117 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
11118 alu.src[0].sel = treg;
11119 alu.src[0].chan = 0;
11120 alu.last = 1;
11121 r = r600_bytecode_add_alu(ctx->bc, &alu);
11122 if (r)
11123 return r;
11124 return 0;
11125 }
11126
11127 /* result.y = mul_high a, b
11128 result.x = mul a,b
11129 result.y += a.x * b.y + a.y * b.x;
11130 */
11131 static int egcm_u64mul(struct r600_shader_ctx *ctx)
11132 {
11133 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
11134 struct r600_bytecode_alu alu;
11135 int r;
11136 int treg = ctx->temp_reg;
11137
11138 /* temp.x = mul_lo a.x, b.x */
11139 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11140 alu.op = ALU_OP2_MULLO_UINT;
11141 alu.dst.sel = treg;
11142 alu.dst.chan = 0;
11143 alu.dst.write = 1;
11144 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
11145 r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
11146 r = emit_mul_int_op(ctx->bc, &alu);
11147 if (r)
11148 return r;
11149
11150 /* temp.y = mul_hi a.x, b.x */
11151 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11152 alu.op = ALU_OP2_MULHI_UINT;
11153 alu.dst.sel = treg;
11154 alu.dst.chan = 1;
11155 alu.dst.write = 1;
11156 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
11157 r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
11158 r = emit_mul_int_op(ctx->bc, &alu);
11159 if (r)
11160 return r;
11161
11162 /* temp.z = mul a.x, b.y */
11163 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11164 alu.op = ALU_OP2_MULLO_UINT;
11165 alu.dst.sel = treg;
11166 alu.dst.chan = 2;
11167 alu.dst.write = 1;
11168 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
11169 r600_bytecode_src(&alu.src[1], &ctx->src[1], 1);
11170 r = emit_mul_int_op(ctx->bc, &alu);
11171 if (r)
11172 return r;
11173
11174 /* temp.w = mul a.y, b.x */
11175 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11176 alu.op = ALU_OP2_MULLO_UINT;
11177 alu.dst.sel = treg;
11178 alu.dst.chan = 3;
11179 alu.dst.write = 1;
11180 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
11181 r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
11182 r = emit_mul_int_op(ctx->bc, &alu);
11183 if (r)
11184 return r;
11185
11186 /* temp.z = temp.z + temp.w */
11187 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11188 alu.op = ALU_OP2_ADD_INT;
11189 alu.dst.sel = treg;
11190 alu.dst.chan = 2;
11191 alu.dst.write = 1;
11192 alu.src[0].sel = treg;
11193 alu.src[0].chan = 2;
11194 alu.src[1].sel = treg;
11195 alu.src[1].chan = 3;
11196 alu.last = 1;
11197 r = r600_bytecode_add_alu(ctx->bc, &alu);
11198 if (r)
11199 return r;
11200
11201 /* temp.y = temp.y + temp.z */
11202 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11203 alu.op = ALU_OP2_ADD_INT;
11204 alu.dst.sel = treg;
11205 alu.dst.chan = 1;
11206 alu.dst.write = 1;
11207 alu.src[0].sel = treg;
11208 alu.src[0].chan = 1;
11209 alu.src[1].sel = treg;
11210 alu.src[1].chan = 2;
11211 alu.last = 1;
11212 r = r600_bytecode_add_alu(ctx->bc, &alu);
11213 if (r)
11214 return r;
11215
11216 /* dst.x = temp.x */
11217 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11218 alu.op = ALU_OP1_MOV;
11219 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
11220 alu.src[0].sel = treg;
11221 alu.src[0].chan = 0;
11222 r = r600_bytecode_add_alu(ctx->bc, &alu);
11223 if (r)
11224 return r;
11225
11226 /* dst.y = temp.y */
11227 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11228 alu.op = ALU_OP1_MOV;
11229 tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
11230 alu.src[0].sel = treg;
11231 alu.src[0].chan = 1;
11232 alu.last = 1;
11233 r = r600_bytecode_add_alu(ctx->bc, &alu);
11234 if (r)
11235 return r;
11236
11237 return 0;
11238 }
11239
11240 static int emit_u64sge(struct r600_shader_ctx *ctx,
11241 int treg,
11242 int src0_sel, int src0_base_chan,
11243 int src1_sel, int src1_base_chan)
11244 {
11245 int r;
11246 /* for 64-bit sge */
11247 /* result = (src0.y > src1.y) || ((src0.y == src1.y) && src0.x >= src1.x)) */
11248 r = single_alu_op2(ctx, ALU_OP2_SETGT_UINT,
11249 treg, 1,
11250 src0_sel, src0_base_chan + 1,
11251 src1_sel, src1_base_chan + 1);
11252 if (r)
11253 return r;
11254
11255 r = single_alu_op2(ctx, ALU_OP2_SETGE_UINT,
11256 treg, 0,
11257 src0_sel, src0_base_chan,
11258 src1_sel, src1_base_chan);
11259 if (r)
11260 return r;
11261
11262 r = single_alu_op2(ctx, ALU_OP2_SETE_INT,
11263 treg, 2,
11264 src0_sel, src0_base_chan + 1,
11265 src1_sel, src1_base_chan + 1);
11266 if (r)
11267 return r;
11268
11269 r = single_alu_op2(ctx, ALU_OP2_AND_INT,
11270 treg, 0,
11271 treg, 0,
11272 treg, 2);
11273 if (r)
11274 return r;
11275
11276 r = single_alu_op2(ctx, ALU_OP2_OR_INT,
11277 treg, 0,
11278 treg, 0,
11279 treg, 1);
11280 if (r)
11281 return r;
11282 return 0;
11283 }
11284
11285 /* this isn't a complete div it's just enough for qbo shader to work */
11286 static int egcm_u64div(struct r600_shader_ctx *ctx)
11287 {
11288 struct r600_bytecode_alu alu;
11289 struct r600_bytecode_alu_src alu_num_hi, alu_num_lo, alu_denom_hi, alu_denom_lo, alu_src;
11290 int r, i;
11291 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
11292
11293 /* make sure we are dividing my a const with 0 in the high bits */
11294 if (ctx->src[1].sel != V_SQ_ALU_SRC_LITERAL)
11295 return -1;
11296 if (ctx->src[1].value[ctx->src[1].swizzle[1]] != 0)
11297 return -1;
11298 /* make sure we are doing one division */
11299 if (inst->Dst[0].Register.WriteMask != 0x3)
11300 return -1;
11301
11302 /* emit_if uses ctx->temp_reg so we can't */
11303 int treg = r600_get_temp(ctx);
11304 int tmp_num = r600_get_temp(ctx);
11305 int sub_tmp = r600_get_temp(ctx);
11306
11307 /* tmp quot are tmp_num.zw */
11308 r600_bytecode_src(&alu_num_lo, &ctx->src[0], 0);
11309 r600_bytecode_src(&alu_num_hi, &ctx->src[0], 1);
11310 r600_bytecode_src(&alu_denom_lo, &ctx->src[1], 0);
11311 r600_bytecode_src(&alu_denom_hi, &ctx->src[1], 1);
11312
11313 /* MOV tmp_num.xy, numerator */
11314 r = single_alu_op2(ctx, ALU_OP1_MOV,
11315 tmp_num, 0,
11316 alu_num_lo.sel, alu_num_lo.chan,
11317 0, 0);
11318 if (r)
11319 return r;
11320 r = single_alu_op2(ctx, ALU_OP1_MOV,
11321 tmp_num, 1,
11322 alu_num_hi.sel, alu_num_hi.chan,
11323 0, 0);
11324 if (r)
11325 return r;
11326
11327 r = single_alu_op2(ctx, ALU_OP1_MOV,
11328 tmp_num, 2,
11329 V_SQ_ALU_SRC_LITERAL, 0,
11330 0, 0);
11331 if (r)
11332 return r;
11333
11334 r = single_alu_op2(ctx, ALU_OP1_MOV,
11335 tmp_num, 3,
11336 V_SQ_ALU_SRC_LITERAL, 0,
11337 0, 0);
11338 if (r)
11339 return r;
11340
11341 /* treg 0 is log2_denom */
11342 /* normally this gets the MSB for the denom high value
11343 - however we know this will always be 0 here. */
11344 r = single_alu_op2(ctx,
11345 ALU_OP1_MOV,
11346 treg, 0,
11347 V_SQ_ALU_SRC_LITERAL, 32,
11348 0, 0);
11349 if (r)
11350 return r;
11351
11352 /* normally check demon hi for 0, but we know it is already */
11353 /* t0.z = num_hi >= denom_lo */
11354 r = single_alu_op2(ctx,
11355 ALU_OP2_SETGE_UINT,
11356 treg, 1,
11357 alu_num_hi.sel, alu_num_hi.chan,
11358 V_SQ_ALU_SRC_LITERAL, alu_denom_lo.value);
11359 if (r)
11360 return r;
11361
11362 memset(&alu_src, 0, sizeof(alu_src));
11363 alu_src.sel = treg;
11364 alu_src.chan = 1;
11365 r = emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src);
11366 if (r)
11367 return r;
11368
11369 /* for loops in here */
11370 /* get msb t0.x = msb(src[1].x) first */
11371 int msb_lo = util_last_bit(alu_denom_lo.value);
11372 r = single_alu_op2(ctx, ALU_OP1_MOV,
11373 treg, 0,
11374 V_SQ_ALU_SRC_LITERAL, msb_lo,
11375 0, 0);
11376 if (r)
11377 return r;
11378
11379 /* unroll the asm here */
11380 for (i = 0; i < 31; i++) {
11381 r = single_alu_op2(ctx, ALU_OP2_SETGE_UINT,
11382 treg, 2,
11383 V_SQ_ALU_SRC_LITERAL, i,
11384 treg, 0);
11385 if (r)
11386 return r;
11387
11388 /* we can do this on the CPU */
11389 uint32_t denom_lo_shl = alu_denom_lo.value << (31 - i);
11390 /* t0.z = tmp_num.y >= t0.z */
11391 r = single_alu_op2(ctx, ALU_OP2_SETGE_UINT,
11392 treg, 1,
11393 tmp_num, 1,
11394 V_SQ_ALU_SRC_LITERAL, denom_lo_shl);
11395 if (r)
11396 return r;
11397
11398 r = single_alu_op2(ctx, ALU_OP2_AND_INT,
11399 treg, 1,
11400 treg, 1,
11401 treg, 2);
11402 if (r)
11403 return r;
11404
11405 memset(&alu_src, 0, sizeof(alu_src));
11406 alu_src.sel = treg;
11407 alu_src.chan = 1;
11408 r = emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src);
11409 if (r)
11410 return r;
11411
11412 r = single_alu_op2(ctx, ALU_OP2_SUB_INT,
11413 tmp_num, 1,
11414 tmp_num, 1,
11415 V_SQ_ALU_SRC_LITERAL, denom_lo_shl);
11416 if (r)
11417 return r;
11418
11419 r = single_alu_op2(ctx, ALU_OP2_OR_INT,
11420 tmp_num, 3,
11421 tmp_num, 3,
11422 V_SQ_ALU_SRC_LITERAL, 1U << (31 - i));
11423 if (r)
11424 return r;
11425
11426 r = tgsi_endif(ctx);
11427 if (r)
11428 return r;
11429 }
11430
11431 /* log2_denom is always <= 31, so manually peel the last loop
11432 * iteration.
11433 */
11434 r = single_alu_op2(ctx, ALU_OP2_SETGE_UINT,
11435 treg, 1,
11436 tmp_num, 1,
11437 V_SQ_ALU_SRC_LITERAL, alu_denom_lo.value);
11438 if (r)
11439 return r;
11440
11441 memset(&alu_src, 0, sizeof(alu_src));
11442 alu_src.sel = treg;
11443 alu_src.chan = 1;
11444 r = emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src);
11445 if (r)
11446 return r;
11447
11448 r = single_alu_op2(ctx, ALU_OP2_SUB_INT,
11449 tmp_num, 1,
11450 tmp_num, 1,
11451 V_SQ_ALU_SRC_LITERAL, alu_denom_lo.value);
11452 if (r)
11453 return r;
11454
11455 r = single_alu_op2(ctx, ALU_OP2_OR_INT,
11456 tmp_num, 3,
11457 tmp_num, 3,
11458 V_SQ_ALU_SRC_LITERAL, 1U);
11459 if (r)
11460 return r;
11461 r = tgsi_endif(ctx);
11462 if (r)
11463 return r;
11464
11465 r = tgsi_endif(ctx);
11466 if (r)
11467 return r;
11468
11469 /* onto the second loop to unroll */
11470 for (i = 0; i < 31; i++) {
11471 r = single_alu_op2(ctx, ALU_OP2_SETGE_UINT,
11472 treg, 1,
11473 V_SQ_ALU_SRC_LITERAL, (63 - (31 - i)),
11474 treg, 0);
11475 if (r)
11476 return r;
11477
11478 uint64_t denom_shl = (uint64_t)alu_denom_lo.value << (31 - i);
11479 r = single_alu_op2(ctx, ALU_OP1_MOV,
11480 treg, 2,
11481 V_SQ_ALU_SRC_LITERAL, (denom_shl & 0xffffffff),
11482 0, 0);
11483 if (r)
11484 return r;
11485
11486 r = single_alu_op2(ctx, ALU_OP1_MOV,
11487 treg, 3,
11488 V_SQ_ALU_SRC_LITERAL, (denom_shl >> 32),
11489 0, 0);
11490 if (r)
11491 return r;
11492
11493 r = emit_u64sge(ctx, sub_tmp,
11494 tmp_num, 0,
11495 treg, 2);
11496 if (r)
11497 return r;
11498
11499 r = single_alu_op2(ctx, ALU_OP2_AND_INT,
11500 treg, 1,
11501 treg, 1,
11502 sub_tmp, 0);
11503 if (r)
11504 return r;
11505
11506 memset(&alu_src, 0, sizeof(alu_src));
11507 alu_src.sel = treg;
11508 alu_src.chan = 1;
11509 r = emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src);
11510 if (r)
11511 return r;
11512
11513
11514 r = emit_u64add(ctx, ALU_OP2_SUB_INT,
11515 sub_tmp,
11516 tmp_num, 0,
11517 treg, 2);
11518 if (r)
11519 return r;
11520
11521 r = single_alu_op2(ctx, ALU_OP1_MOV,
11522 tmp_num, 0,
11523 sub_tmp, 0,
11524 0, 0);
11525 if (r)
11526 return r;
11527
11528 r = single_alu_op2(ctx, ALU_OP1_MOV,
11529 tmp_num, 1,
11530 sub_tmp, 1,
11531 0, 0);
11532 if (r)
11533 return r;
11534
11535 r = single_alu_op2(ctx, ALU_OP2_OR_INT,
11536 tmp_num, 2,
11537 tmp_num, 2,
11538 V_SQ_ALU_SRC_LITERAL, 1U << (31 - i));
11539 if (r)
11540 return r;
11541
11542 r = tgsi_endif(ctx);
11543 if (r)
11544 return r;
11545 }
11546
11547 /* log2_denom is always <= 63, so manually peel the last loop
11548 * iteration.
11549 */
11550 uint64_t denom_shl = (uint64_t)alu_denom_lo.value;
11551 r = single_alu_op2(ctx, ALU_OP1_MOV,
11552 treg, 2,
11553 V_SQ_ALU_SRC_LITERAL, (denom_shl & 0xffffffff),
11554 0, 0);
11555 if (r)
11556 return r;
11557
11558 r = single_alu_op2(ctx, ALU_OP1_MOV,
11559 treg, 3,
11560 V_SQ_ALU_SRC_LITERAL, (denom_shl >> 32),
11561 0, 0);
11562 if (r)
11563 return r;
11564
11565 r = emit_u64sge(ctx, sub_tmp,
11566 tmp_num, 0,
11567 treg, 2);
11568 if (r)
11569 return r;
11570
11571 memset(&alu_src, 0, sizeof(alu_src));
11572 alu_src.sel = sub_tmp;
11573 alu_src.chan = 0;
11574 r = emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src);
11575 if (r)
11576 return r;
11577
11578 r = emit_u64add(ctx, ALU_OP2_SUB_INT,
11579 sub_tmp,
11580 tmp_num, 0,
11581 treg, 2);
11582 if (r)
11583 return r;
11584
11585 r = single_alu_op2(ctx, ALU_OP2_OR_INT,
11586 tmp_num, 2,
11587 tmp_num, 2,
11588 V_SQ_ALU_SRC_LITERAL, 1U);
11589 if (r)
11590 return r;
11591 r = tgsi_endif(ctx);
11592 if (r)
11593 return r;
11594
11595 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11596 alu.op = ALU_OP1_MOV;
11597 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
11598 alu.src[0].sel = tmp_num;
11599 alu.src[0].chan = 2;
11600 r = r600_bytecode_add_alu(ctx->bc, &alu);
11601 if (r)
11602 return r;
11603
11604 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11605 alu.op = ALU_OP1_MOV;
11606 tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
11607 alu.src[0].sel = tmp_num;
11608 alu.src[0].chan = 3;
11609 alu.last = 1;
11610 r = r600_bytecode_add_alu(ctx->bc, &alu);
11611 if (r)
11612 return r;
11613 return 0;
11614 }
11615
11616 static int egcm_u64sne(struct r600_shader_ctx *ctx)
11617 {
11618 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
11619 struct r600_bytecode_alu alu;
11620 int r;
11621 int treg = ctx->temp_reg;
11622
11623 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11624 alu.op = ALU_OP2_SETNE_INT;
11625 alu.dst.sel = treg;
11626 alu.dst.chan = 0;
11627 alu.dst.write = 1;
11628 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
11629 r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
11630 r = r600_bytecode_add_alu(ctx->bc, &alu);
11631 if (r)
11632 return r;
11633
11634 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11635 alu.op = ALU_OP2_SETNE_INT;
11636 alu.dst.sel = treg;
11637 alu.dst.chan = 1;
11638 alu.dst.write = 1;
11639 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
11640 r600_bytecode_src(&alu.src[1], &ctx->src[1], 1);
11641 alu.last = 1;
11642 r = r600_bytecode_add_alu(ctx->bc, &alu);
11643 if (r)
11644 return r;
11645
11646 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11647 alu.op = ALU_OP2_OR_INT;
11648 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
11649 alu.src[0].sel = treg;
11650 alu.src[0].chan = 0;
11651 alu.src[1].sel = treg;
11652 alu.src[1].chan = 1;
11653 alu.last = 1;
11654 r = r600_bytecode_add_alu(ctx->bc, &alu);
11655 if (r)
11656 return r;
11657 return 0;
11658 }
11659
11660 static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] = {
11661 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_r600_arl},
11662 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
11663 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
11664
11665 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, tgsi_trans_srcx_replicate},
11666
11667 [TGSI_OPCODE_RSQ] = { ALU_OP0_NOP, tgsi_rsq},
11668 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
11669 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
11670 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL_IEEE, tgsi_op2},
11671 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
11672 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
11673 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
11674 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
11675 /* MIN_DX10 returns non-nan result if one src is NaN, MIN returns NaN */
11676 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN_DX10, tgsi_op2},
11677 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX_DX10, tgsi_op2},
11678 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
11679 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
11680 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD_IEEE, tgsi_op3},
11681 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
11682 [TGSI_OPCODE_FMA] = { ALU_OP0_NOP, tgsi_unsupported},
11683 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, tgsi_trans_srcx_replicate},
11684 [21] = { ALU_OP0_NOP, tgsi_unsupported},
11685 [22] = { ALU_OP0_NOP, tgsi_unsupported},
11686 [23] = { ALU_OP0_NOP, tgsi_unsupported},
11687 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
11688 [25] = { ALU_OP0_NOP, tgsi_unsupported},
11689 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
11690 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
11691 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, tgsi_trans_srcx_replicate},
11692 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, tgsi_trans_srcx_replicate},
11693 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, tgsi_pow},
11694 [31] = { ALU_OP0_NOP, tgsi_unsupported},
11695 [32] = { ALU_OP0_NOP, tgsi_unsupported},
11696 [TGSI_OPCODE_CLOCK] = { ALU_OP0_NOP, tgsi_unsupported},
11697 [34] = { ALU_OP0_NOP, tgsi_unsupported},
11698 [35] = { ALU_OP0_NOP, tgsi_unsupported},
11699 [TGSI_OPCODE_COS] = { ALU_OP1_COS, tgsi_trig},
11700 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
11701 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
11702 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
11703 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_unsupported},
11704 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
11705 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
11706 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
11707 [44] = { ALU_OP0_NOP, tgsi_unsupported},
11708 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
11709 [46] = { ALU_OP0_NOP, tgsi_unsupported},
11710 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
11711 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, tgsi_trig},
11712 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
11713 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
11714 [51] = { ALU_OP0_NOP, tgsi_unsupported},
11715 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
11716 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
11717 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
11718 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_unsupported},
11719 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
11720 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
11721 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
11722 [59] = { ALU_OP0_NOP, tgsi_unsupported},
11723 [60] = { ALU_OP0_NOP, tgsi_unsupported},
11724 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_r600_arl},
11725 [62] = { ALU_OP0_NOP, tgsi_unsupported},
11726 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
11727 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
11728 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
11729 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
11730 [67] = { ALU_OP0_NOP, tgsi_unsupported},
11731 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
11732 [69] = { ALU_OP0_NOP, tgsi_unsupported},
11733 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
11734 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
11735 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
11736 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
11737 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
11738 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
11739 [76] = { ALU_OP0_NOP, tgsi_unsupported},
11740 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
11741 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
11742 [TGSI_OPCODE_DDX_FINE] = { ALU_OP0_NOP, tgsi_unsupported},
11743 [TGSI_OPCODE_DDY_FINE] = { ALU_OP0_NOP, tgsi_unsupported},
11744 [81] = { ALU_OP0_NOP, tgsi_unsupported},
11745 [82] = { ALU_OP0_NOP, tgsi_unsupported},
11746 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
11747 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2_trans},
11748 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
11749 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
11750 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2_trans},
11751 [88] = { ALU_OP0_NOP, tgsi_unsupported},
11752 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
11753 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
11754 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
11755 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
11756 [93] = { ALU_OP0_NOP, tgsi_unsupported},
11757 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
11758 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
11759 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
11760 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
11761 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
11762 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
11763 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
11764 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
11765 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
11766 [103] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
11767 [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
11768 [TGSI_OPCODE_RESQ] = { ALU_OP0_NOP, tgsi_unsupported},
11769 [106] = { ALU_OP0_NOP, tgsi_unsupported},
11770 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
11771 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
11772 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
11773 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
11774 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
11775 [TGSI_OPCODE_MEMBAR] = { ALU_OP0_NOP, tgsi_unsupported},
11776 [113] = { ALU_OP0_NOP, tgsi_unsupported},
11777 [114] = { ALU_OP0_NOP, tgsi_unsupported},
11778 [115] = { ALU_OP0_NOP, tgsi_unsupported},
11779 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
11780 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
11781 [TGSI_OPCODE_DFMA] = { ALU_OP0_NOP, tgsi_unsupported},
11782 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_op2_trans},
11783 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
11784 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
11785 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
11786 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
11787 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
11788 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2_trans},
11789 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
11790 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_op2_trans},
11791 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2_trans},
11792 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
11793 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
11794 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
11795 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
11796 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
11797 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
11798 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_UINT, tgsi_op2_trans},
11799 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
11800 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
11801 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2_trans},
11802 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
11803 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2_swap},
11804 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
11805 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
11806 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
11807 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
11808 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
11809 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
11810 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
11811 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
11812 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
11813 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
11814 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
11815 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
11816 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
11817 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
11818 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
11819 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
11820 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_r600_arl},
11821 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
11822 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
11823 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
11824 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_unsupported},
11825 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_unsupported},
11826 [163] = { ALU_OP0_NOP, tgsi_unsupported},
11827 [164] = { ALU_OP0_NOP, tgsi_unsupported},
11828 [165] = { ALU_OP0_NOP, tgsi_unsupported},
11829 [TGSI_OPCODE_BARRIER] = { ALU_OP0_NOP, tgsi_unsupported},
11830 [TGSI_OPCODE_ATOMUADD] = { ALU_OP0_NOP, tgsi_unsupported},
11831 [TGSI_OPCODE_ATOMXCHG] = { ALU_OP0_NOP, tgsi_unsupported},
11832 [TGSI_OPCODE_ATOMCAS] = { ALU_OP0_NOP, tgsi_unsupported},
11833 [TGSI_OPCODE_ATOMAND] = { ALU_OP0_NOP, tgsi_unsupported},
11834 [TGSI_OPCODE_ATOMOR] = { ALU_OP0_NOP, tgsi_unsupported},
11835 [TGSI_OPCODE_ATOMXOR] = { ALU_OP0_NOP, tgsi_unsupported},
11836 [TGSI_OPCODE_ATOMUMIN] = { ALU_OP0_NOP, tgsi_unsupported},
11837 [TGSI_OPCODE_ATOMUMAX] = { ALU_OP0_NOP, tgsi_unsupported},
11838 [TGSI_OPCODE_ATOMIMIN] = { ALU_OP0_NOP, tgsi_unsupported},
11839 [TGSI_OPCODE_ATOMIMAX] = { ALU_OP0_NOP, tgsi_unsupported},
11840 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
11841 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
11842 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
11843 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, tgsi_op2_trans},
11844 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, tgsi_op2_trans},
11845 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_unsupported},
11846 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_unsupported},
11847 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_unsupported},
11848 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_unsupported},
11849 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_unsupported},
11850 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_unsupported},
11851 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_unsupported},
11852 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_unsupported},
11853 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_unsupported},
11854 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_unsupported},
11855 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_unsupported},
11856 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_unsupported},
11857 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_unsupported},
11858 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
11859 };
11860
11861 static const struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = {
11862 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_eg_arl},
11863 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
11864 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
11865 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, tgsi_trans_srcx_replicate},
11866 [TGSI_OPCODE_RSQ] = { ALU_OP0_NOP, tgsi_rsq},
11867 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
11868 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
11869 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL_IEEE, tgsi_op2},
11870 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
11871 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
11872 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
11873 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
11874 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN_DX10, tgsi_op2},
11875 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX_DX10, tgsi_op2},
11876 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
11877 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
11878 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD_IEEE, tgsi_op3},
11879 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
11880 [TGSI_OPCODE_FMA] = { ALU_OP3_FMA, tgsi_op3},
11881 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, tgsi_trans_srcx_replicate},
11882 [21] = { ALU_OP0_NOP, tgsi_unsupported},
11883 [22] = { ALU_OP0_NOP, tgsi_unsupported},
11884 [23] = { ALU_OP0_NOP, tgsi_unsupported},
11885 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
11886 [25] = { ALU_OP0_NOP, tgsi_unsupported},
11887 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
11888 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
11889 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, tgsi_trans_srcx_replicate},
11890 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, tgsi_trans_srcx_replicate},
11891 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, tgsi_pow},
11892 [31] = { ALU_OP0_NOP, tgsi_unsupported},
11893 [32] = { ALU_OP0_NOP, tgsi_unsupported},
11894 [TGSI_OPCODE_CLOCK] = { ALU_OP0_NOP, tgsi_clock},
11895 [34] = { ALU_OP0_NOP, tgsi_unsupported},
11896 [35] = { ALU_OP0_NOP, tgsi_unsupported},
11897 [TGSI_OPCODE_COS] = { ALU_OP1_COS, tgsi_trig},
11898 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
11899 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
11900 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
11901 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_pk2h},
11902 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
11903 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
11904 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
11905 [44] = { ALU_OP0_NOP, tgsi_unsupported},
11906 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
11907 [46] = { ALU_OP0_NOP, tgsi_unsupported},
11908 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
11909 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, tgsi_trig},
11910 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
11911 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
11912 [51] = { ALU_OP0_NOP, tgsi_unsupported},
11913 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
11914 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
11915 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
11916 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_up2h},
11917 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
11918 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
11919 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
11920 [59] = { ALU_OP0_NOP, tgsi_unsupported},
11921 [60] = { ALU_OP0_NOP, tgsi_unsupported},
11922 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_eg_arl},
11923 [62] = { ALU_OP0_NOP, tgsi_unsupported},
11924 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
11925 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
11926 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
11927 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
11928 [67] = { ALU_OP0_NOP, tgsi_unsupported},
11929 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
11930 [69] = { ALU_OP0_NOP, tgsi_unsupported},
11931 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
11932 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
11933 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
11934 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
11935 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
11936 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
11937 [76] = { ALU_OP0_NOP, tgsi_unsupported},
11938 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
11939 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
11940 [TGSI_OPCODE_DDX_FINE] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
11941 [TGSI_OPCODE_DDY_FINE] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
11942 [82] = { ALU_OP0_NOP, tgsi_unsupported},
11943 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
11944 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2_trans},
11945 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
11946 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
11947 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2},
11948 [88] = { ALU_OP0_NOP, tgsi_unsupported},
11949 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
11950 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
11951 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
11952 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
11953 [93] = { ALU_OP0_NOP, tgsi_unsupported},
11954 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
11955 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
11956 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
11957 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
11958 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
11959 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
11960 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
11961 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
11962 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
11963 [103] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
11964 [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
11965 [TGSI_OPCODE_RESQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_resq},
11966 [106] = { ALU_OP0_NOP, tgsi_unsupported},
11967 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
11968 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
11969 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
11970 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
11971 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
11972 [TGSI_OPCODE_MEMBAR] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier},
11973 [113] = { ALU_OP0_NOP, tgsi_unsupported},
11974 [114] = { ALU_OP0_NOP, tgsi_unsupported},
11975 [115] = { ALU_OP0_NOP, tgsi_unsupported},
11976 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
11977 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
11978 /* Refer below for TGSI_OPCODE_DFMA */
11979 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_f2i},
11980 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
11981 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
11982 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
11983 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
11984 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
11985 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2},
11986 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
11987 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_f2i},
11988 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2_trans},
11989 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
11990 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
11991 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
11992 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
11993 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
11994 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
11995 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_UINT, tgsi_op2_trans},
11996 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
11997 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
11998 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2},
11999 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
12000 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2},
12001 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
12002 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
12003 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
12004 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
12005 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
12006 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
12007 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
12008 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
12009 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
12010 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
12011 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
12012 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
12013 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
12014 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
12015 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
12016 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
12017 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_eg_arl},
12018 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
12019 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
12020 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
12021 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_load},
12022 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_store},
12023 [163] = { ALU_OP0_NOP, tgsi_unsupported},
12024 [164] = { ALU_OP0_NOP, tgsi_unsupported},
12025 [165] = { ALU_OP0_NOP, tgsi_unsupported},
12026 [TGSI_OPCODE_BARRIER] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier},
12027 [TGSI_OPCODE_ATOMUADD] = { V_RAT_INST_ADD_RTN, tgsi_atomic_op},
12028 [TGSI_OPCODE_ATOMXCHG] = { V_RAT_INST_XCHG_RTN, tgsi_atomic_op},
12029 [TGSI_OPCODE_ATOMCAS] = { V_RAT_INST_CMPXCHG_INT_RTN, tgsi_atomic_op},
12030 [TGSI_OPCODE_ATOMAND] = { V_RAT_INST_AND_RTN, tgsi_atomic_op},
12031 [TGSI_OPCODE_ATOMOR] = { V_RAT_INST_OR_RTN, tgsi_atomic_op},
12032 [TGSI_OPCODE_ATOMXOR] = { V_RAT_INST_XOR_RTN, tgsi_atomic_op},
12033 [TGSI_OPCODE_ATOMUMIN] = { V_RAT_INST_MIN_UINT_RTN, tgsi_atomic_op},
12034 [TGSI_OPCODE_ATOMUMAX] = { V_RAT_INST_MAX_UINT_RTN, tgsi_atomic_op},
12035 [TGSI_OPCODE_ATOMIMIN] = { V_RAT_INST_MIN_INT_RTN, tgsi_atomic_op},
12036 [TGSI_OPCODE_ATOMIMAX] = { V_RAT_INST_MAX_INT_RTN, tgsi_atomic_op},
12037 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
12038 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
12039 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
12040 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, tgsi_op2_trans},
12041 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, tgsi_op2_trans},
12042 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_tex},
12043 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_tex},
12044 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_bfe},
12045 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_bfe},
12046 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_bfi},
12047 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_op2},
12048 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_op2},
12049 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_op2},
12050 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_msb},
12051 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_msb},
12052 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_interp_egcm},
12053 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_interp_egcm},
12054 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_interp_egcm},
12055 [TGSI_OPCODE_F2D] = { ALU_OP1_FLT32_TO_FLT64, tgsi_op2_64},
12056 [TGSI_OPCODE_D2F] = { ALU_OP1_FLT64_TO_FLT32, tgsi_op2_64_single_dest},
12057 [TGSI_OPCODE_DABS] = { ALU_OP1_MOV, tgsi_op2_64},
12058 [TGSI_OPCODE_DNEG] = { ALU_OP2_ADD_64, tgsi_dneg},
12059 [TGSI_OPCODE_DADD] = { ALU_OP2_ADD_64, tgsi_op2_64},
12060 [TGSI_OPCODE_DMUL] = { ALU_OP2_MUL_64, cayman_mul_double_instr},
12061 [TGSI_OPCODE_DDIV] = { 0, cayman_ddiv_instr },
12062 [TGSI_OPCODE_DMAX] = { ALU_OP2_MAX_64, tgsi_op2_64},
12063 [TGSI_OPCODE_DMIN] = { ALU_OP2_MIN_64, tgsi_op2_64},
12064 [TGSI_OPCODE_DSLT] = { ALU_OP2_SETGT_64, tgsi_op2_64_single_dest_s},
12065 [TGSI_OPCODE_DSGE] = { ALU_OP2_SETGE_64, tgsi_op2_64_single_dest},
12066 [TGSI_OPCODE_DSEQ] = { ALU_OP2_SETE_64, tgsi_op2_64_single_dest},
12067 [TGSI_OPCODE_DSNE] = { ALU_OP2_SETNE_64, tgsi_op2_64_single_dest},
12068 [TGSI_OPCODE_DRCP] = { ALU_OP2_RECIP_64, cayman_emit_double_instr},
12069 [TGSI_OPCODE_DSQRT] = { ALU_OP2_SQRT_64, cayman_emit_double_instr},
12070 [TGSI_OPCODE_DMAD] = { ALU_OP3_FMA_64, tgsi_op3_64},
12071 [TGSI_OPCODE_DFMA] = { ALU_OP3_FMA_64, tgsi_op3_64},
12072 [TGSI_OPCODE_DFRAC] = { ALU_OP1_FRACT_64, tgsi_op2_64},
12073 [TGSI_OPCODE_DLDEXP] = { ALU_OP2_LDEXP_64, tgsi_op2_64},
12074 [TGSI_OPCODE_DFRACEXP] = { ALU_OP1_FREXP_64, tgsi_dfracexp},
12075 [TGSI_OPCODE_D2I] = { ALU_OP1_FLT_TO_INT, egcm_double_to_int},
12076 [TGSI_OPCODE_I2D] = { ALU_OP1_INT_TO_FLT, egcm_int_to_double},
12077 [TGSI_OPCODE_D2U] = { ALU_OP1_FLT_TO_UINT, egcm_double_to_int},
12078 [TGSI_OPCODE_U2D] = { ALU_OP1_UINT_TO_FLT, egcm_int_to_double},
12079 [TGSI_OPCODE_DRSQ] = { ALU_OP2_RECIPSQRT_64, cayman_emit_double_instr},
12080 [TGSI_OPCODE_U64SNE] = { ALU_OP0_NOP, egcm_u64sne },
12081 [TGSI_OPCODE_U64ADD] = { ALU_OP0_NOP, egcm_u64add },
12082 [TGSI_OPCODE_U64MUL] = { ALU_OP0_NOP, egcm_u64mul },
12083 [TGSI_OPCODE_U64DIV] = { ALU_OP0_NOP, egcm_u64div },
12084 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
12085 };
12086
12087 static const struct r600_shader_tgsi_instruction cm_shader_tgsi_instruction[] = {
12088 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_eg_arl},
12089 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
12090 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
12091 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, cayman_emit_float_instr},
12092 [TGSI_OPCODE_RSQ] = { ALU_OP1_RECIPSQRT_IEEE, cayman_emit_float_instr},
12093 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
12094 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
12095 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL_IEEE, tgsi_op2},
12096 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
12097 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
12098 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
12099 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
12100 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN_DX10, tgsi_op2},
12101 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX_DX10, tgsi_op2},
12102 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
12103 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
12104 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD_IEEE, tgsi_op3},
12105 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
12106 [TGSI_OPCODE_FMA] = { ALU_OP3_FMA, tgsi_op3},
12107 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, cayman_emit_float_instr},
12108 [21] = { ALU_OP0_NOP, tgsi_unsupported},
12109 [22] = { ALU_OP0_NOP, tgsi_unsupported},
12110 [23] = { ALU_OP0_NOP, tgsi_unsupported},
12111 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
12112 [25] = { ALU_OP0_NOP, tgsi_unsupported},
12113 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
12114 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
12115 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, cayman_emit_float_instr},
12116 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, cayman_emit_float_instr},
12117 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, cayman_pow},
12118 [31] = { ALU_OP0_NOP, tgsi_unsupported},
12119 [32] = { ALU_OP0_NOP, tgsi_unsupported},
12120 [TGSI_OPCODE_CLOCK] = { ALU_OP0_NOP, tgsi_clock},
12121 [34] = { ALU_OP0_NOP, tgsi_unsupported},
12122 [35] = { ALU_OP0_NOP, tgsi_unsupported},
12123 [TGSI_OPCODE_COS] = { ALU_OP1_COS, cayman_trig},
12124 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
12125 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
12126 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
12127 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_pk2h},
12128 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
12129 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
12130 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
12131 [44] = { ALU_OP0_NOP, tgsi_unsupported},
12132 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
12133 [46] = { ALU_OP0_NOP, tgsi_unsupported},
12134 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
12135 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, cayman_trig},
12136 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
12137 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
12138 [51] = { ALU_OP0_NOP, tgsi_unsupported},
12139 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
12140 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
12141 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
12142 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_up2h},
12143 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
12144 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
12145 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
12146 [59] = { ALU_OP0_NOP, tgsi_unsupported},
12147 [60] = { ALU_OP0_NOP, tgsi_unsupported},
12148 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_eg_arl},
12149 [62] = { ALU_OP0_NOP, tgsi_unsupported},
12150 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
12151 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
12152 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
12153 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
12154 [67] = { ALU_OP0_NOP, tgsi_unsupported},
12155 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
12156 [69] = { ALU_OP0_NOP, tgsi_unsupported},
12157 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
12158 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
12159 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
12160 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
12161 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
12162 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
12163 [76] = { ALU_OP0_NOP, tgsi_unsupported},
12164 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
12165 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
12166 [TGSI_OPCODE_DDX_FINE] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
12167 [TGSI_OPCODE_DDY_FINE] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
12168 [82] = { ALU_OP0_NOP, tgsi_unsupported},
12169 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
12170 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2},
12171 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
12172 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
12173 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2},
12174 [88] = { ALU_OP0_NOP, tgsi_unsupported},
12175 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
12176 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
12177 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
12178 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
12179 [93] = { ALU_OP0_NOP, tgsi_unsupported},
12180 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
12181 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
12182 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
12183 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
12184 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
12185 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
12186 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
12187 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
12188 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
12189 [103] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
12190 [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
12191 [TGSI_OPCODE_RESQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_resq},
12192 [106] = { ALU_OP0_NOP, tgsi_unsupported},
12193 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
12194 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
12195 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
12196 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
12197 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
12198 [TGSI_OPCODE_MEMBAR] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier},
12199 [113] = { ALU_OP0_NOP, tgsi_unsupported},
12200 [114] = { ALU_OP0_NOP, tgsi_unsupported},
12201 [115] = { ALU_OP0_NOP, tgsi_unsupported},
12202 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
12203 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
12204 /* Refer below for TGSI_OPCODE_DFMA */
12205 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_op2},
12206 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
12207 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
12208 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
12209 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
12210 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
12211 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2},
12212 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
12213 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_op2},
12214 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2},
12215 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
12216 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
12217 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
12218 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
12219 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
12220 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
12221 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_INT, cayman_mul_int_instr},
12222 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
12223 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
12224 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2},
12225 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
12226 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2},
12227 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
12228 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
12229 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
12230 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
12231 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
12232 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
12233 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
12234 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
12235 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
12236 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
12237 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
12238 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
12239 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
12240 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
12241 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
12242 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
12243 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_eg_arl},
12244 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
12245 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
12246 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
12247 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_load},
12248 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_store},
12249 [163] = { ALU_OP0_NOP, tgsi_unsupported},
12250 [164] = { ALU_OP0_NOP, tgsi_unsupported},
12251 [165] = { ALU_OP0_NOP, tgsi_unsupported},
12252 [TGSI_OPCODE_BARRIER] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier},
12253 [TGSI_OPCODE_ATOMUADD] = { V_RAT_INST_ADD_RTN, tgsi_atomic_op},
12254 [TGSI_OPCODE_ATOMXCHG] = { V_RAT_INST_XCHG_RTN, tgsi_atomic_op},
12255 [TGSI_OPCODE_ATOMCAS] = { V_RAT_INST_CMPXCHG_INT_RTN, tgsi_atomic_op},
12256 [TGSI_OPCODE_ATOMAND] = { V_RAT_INST_AND_RTN, tgsi_atomic_op},
12257 [TGSI_OPCODE_ATOMOR] = { V_RAT_INST_OR_RTN, tgsi_atomic_op},
12258 [TGSI_OPCODE_ATOMXOR] = { V_RAT_INST_XOR_RTN, tgsi_atomic_op},
12259 [TGSI_OPCODE_ATOMUMIN] = { V_RAT_INST_MIN_UINT_RTN, tgsi_atomic_op},
12260 [TGSI_OPCODE_ATOMUMAX] = { V_RAT_INST_MAX_UINT_RTN, tgsi_atomic_op},
12261 [TGSI_OPCODE_ATOMIMIN] = { V_RAT_INST_MIN_INT_RTN, tgsi_atomic_op},
12262 [TGSI_OPCODE_ATOMIMAX] = { V_RAT_INST_MAX_INT_RTN, tgsi_atomic_op},
12263 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
12264 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
12265 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
12266 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, cayman_mul_int_instr},
12267 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, cayman_mul_int_instr},
12268 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_tex},
12269 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_tex},
12270 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_bfe},
12271 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_bfe},
12272 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_bfi},
12273 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_op2},
12274 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_op2},
12275 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_op2},
12276 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_msb},
12277 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_msb},
12278 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_interp_egcm},
12279 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_interp_egcm},
12280 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_interp_egcm},
12281 [TGSI_OPCODE_F2D] = { ALU_OP1_FLT32_TO_FLT64, tgsi_op2_64},
12282 [TGSI_OPCODE_D2F] = { ALU_OP1_FLT64_TO_FLT32, tgsi_op2_64_single_dest},
12283 [TGSI_OPCODE_DABS] = { ALU_OP1_MOV, tgsi_op2_64},
12284 [TGSI_OPCODE_DNEG] = { ALU_OP2_ADD_64, tgsi_dneg},
12285 [TGSI_OPCODE_DADD] = { ALU_OP2_ADD_64, tgsi_op2_64},
12286 [TGSI_OPCODE_DMUL] = { ALU_OP2_MUL_64, cayman_mul_double_instr},
12287 [TGSI_OPCODE_DDIV] = { 0, cayman_ddiv_instr },
12288 [TGSI_OPCODE_DMAX] = { ALU_OP2_MAX_64, tgsi_op2_64},
12289 [TGSI_OPCODE_DMIN] = { ALU_OP2_MIN_64, tgsi_op2_64},
12290 [TGSI_OPCODE_DSLT] = { ALU_OP2_SETGT_64, tgsi_op2_64_single_dest_s},
12291 [TGSI_OPCODE_DSGE] = { ALU_OP2_SETGE_64, tgsi_op2_64_single_dest},
12292 [TGSI_OPCODE_DSEQ] = { ALU_OP2_SETE_64, tgsi_op2_64_single_dest},
12293 [TGSI_OPCODE_DSNE] = { ALU_OP2_SETNE_64, tgsi_op2_64_single_dest},
12294 [TGSI_OPCODE_DRCP] = { ALU_OP2_RECIP_64, cayman_emit_double_instr},
12295 [TGSI_OPCODE_DSQRT] = { ALU_OP2_SQRT_64, cayman_emit_double_instr},
12296 [TGSI_OPCODE_DMAD] = { ALU_OP3_FMA_64, tgsi_op3_64},
12297 [TGSI_OPCODE_DFMA] = { ALU_OP3_FMA_64, tgsi_op3_64},
12298 [TGSI_OPCODE_DFRAC] = { ALU_OP1_FRACT_64, tgsi_op2_64},
12299 [TGSI_OPCODE_DLDEXP] = { ALU_OP2_LDEXP_64, tgsi_op2_64},
12300 [TGSI_OPCODE_DFRACEXP] = { ALU_OP1_FREXP_64, tgsi_dfracexp},
12301 [TGSI_OPCODE_D2I] = { ALU_OP1_FLT_TO_INT, egcm_double_to_int},
12302 [TGSI_OPCODE_I2D] = { ALU_OP1_INT_TO_FLT, egcm_int_to_double},
12303 [TGSI_OPCODE_D2U] = { ALU_OP1_FLT_TO_UINT, egcm_double_to_int},
12304 [TGSI_OPCODE_U2D] = { ALU_OP1_UINT_TO_FLT, egcm_int_to_double},
12305 [TGSI_OPCODE_DRSQ] = { ALU_OP2_RECIPSQRT_64, cayman_emit_double_instr},
12306 [TGSI_OPCODE_U64SNE] = { ALU_OP0_NOP, egcm_u64sne },
12307 [TGSI_OPCODE_U64ADD] = { ALU_OP0_NOP, egcm_u64add },
12308 [TGSI_OPCODE_U64MUL] = { ALU_OP0_NOP, egcm_u64mul },
12309 [TGSI_OPCODE_U64DIV] = { ALU_OP0_NOP, egcm_u64div },
12310 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
12311 };