r600: Correct evaluation of cube array index and face
[mesa.git] / src / gallium / drivers / r600 / r600_shader.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "r600_sq.h"
24 #include "r600_formats.h"
25 #include "r600_opcodes.h"
26 #include "r600_shader.h"
27 #include "r600d.h"
28
29 #include "sb/sb_public.h"
30
31 #include "pipe/p_shader_tokens.h"
32 #include "tgsi/tgsi_info.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "tgsi/tgsi_scan.h"
35 #include "tgsi/tgsi_dump.h"
36 #include "util/u_bitcast.h"
37 #include "util/u_memory.h"
38 #include "util/u_math.h"
39 #include <stdio.h>
40 #include <errno.h>
41
42 /* CAYMAN notes
43 Why CAYMAN got loops for lots of instructions is explained here.
44
45 -These 8xx t-slot only ops are implemented in all vector slots.
46 MUL_LIT, FLT_TO_UINT, INT_TO_FLT, UINT_TO_FLT
47 These 8xx t-slot only opcodes become vector ops, with all four
48 slots expecting the arguments on sources a and b. Result is
49 broadcast to all channels.
50 MULLO_INT, MULHI_INT, MULLO_UINT, MULHI_UINT, MUL_64
51 These 8xx t-slot only opcodes become vector ops in the z, y, and
52 x slots.
53 EXP_IEEE, LOG_IEEE/CLAMPED, RECIP_IEEE/CLAMPED/FF/INT/UINT/_64/CLAMPED_64
54 RECIPSQRT_IEEE/CLAMPED/FF/_64/CLAMPED_64
55 SQRT_IEEE/_64
56 SIN/COS
57 The w slot may have an independent co-issued operation, or if the
58 result is required to be in the w slot, the opcode above may be
59 issued in the w slot as well.
60 The compiler must issue the source argument to slots z, y, and x
61 */
62
63 /* Contents of r0 on entry to various shaders
64
65 VS - .x = VertexID
66 .y = RelVertexID (??)
67 .w = InstanceID
68
69 GS - r0.xyw, r1.xyz = per-vertex offsets
70 r0.z = PrimitiveID
71
72 TCS - .x = PatchID
73 .y = RelPatchID (??)
74 .z = InvocationID
75 .w = tess factor base.
76
77 TES - .x = TessCoord.x
78 - .y = TessCoord.y
79 - .z = RelPatchID (??)
80 - .w = PrimitiveID
81
82 PS - face_gpr.z = SampleMask
83 face_gpr.w = SampleID
84 */
85 #define R600_SHADER_BUFFER_INFO_SEL (512 + R600_BUFFER_INFO_OFFSET / 16)
86 static int r600_shader_from_tgsi(struct r600_context *rctx,
87 struct r600_pipe_shader *pipeshader,
88 union r600_shader_key key);
89
90 static void r600_add_gpr_array(struct r600_shader *ps, int start_gpr,
91 int size, unsigned comp_mask) {
92
93 if (!size)
94 return;
95
96 if (ps->num_arrays == ps->max_arrays) {
97 ps->max_arrays += 64;
98 ps->arrays = realloc(ps->arrays, ps->max_arrays *
99 sizeof(struct r600_shader_array));
100 }
101
102 int n = ps->num_arrays;
103 ++ps->num_arrays;
104
105 ps->arrays[n].comp_mask = comp_mask;
106 ps->arrays[n].gpr_start = start_gpr;
107 ps->arrays[n].gpr_count = size;
108 }
109
110 static void r600_dump_streamout(struct pipe_stream_output_info *so)
111 {
112 unsigned i;
113
114 fprintf(stderr, "STREAMOUT\n");
115 for (i = 0; i < so->num_outputs; i++) {
116 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
117 so->output[i].start_component;
118 fprintf(stderr, " %i: MEM_STREAM%d_BUF%i[%i..%i] <- OUT[%i].%s%s%s%s%s\n",
119 i,
120 so->output[i].stream,
121 so->output[i].output_buffer,
122 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
123 so->output[i].register_index,
124 mask & 1 ? "x" : "",
125 mask & 2 ? "y" : "",
126 mask & 4 ? "z" : "",
127 mask & 8 ? "w" : "",
128 so->output[i].dst_offset < so->output[i].start_component ? " (will lower)" : "");
129 }
130 }
131
132 static int store_shader(struct pipe_context *ctx,
133 struct r600_pipe_shader *shader)
134 {
135 struct r600_context *rctx = (struct r600_context *)ctx;
136 uint32_t *ptr, i;
137
138 if (shader->bo == NULL) {
139 shader->bo = (struct r600_resource*)
140 pipe_buffer_create(ctx->screen, 0, PIPE_USAGE_IMMUTABLE, shader->shader.bc.ndw * 4);
141 if (shader->bo == NULL) {
142 return -ENOMEM;
143 }
144 ptr = r600_buffer_map_sync_with_rings(&rctx->b, shader->bo, PIPE_TRANSFER_WRITE);
145 if (R600_BIG_ENDIAN) {
146 for (i = 0; i < shader->shader.bc.ndw; ++i) {
147 ptr[i] = util_cpu_to_le32(shader->shader.bc.bytecode[i]);
148 }
149 } else {
150 memcpy(ptr, shader->shader.bc.bytecode, shader->shader.bc.ndw * sizeof(*ptr));
151 }
152 rctx->b.ws->buffer_unmap(shader->bo->buf);
153 }
154
155 return 0;
156 }
157
158 int r600_pipe_shader_create(struct pipe_context *ctx,
159 struct r600_pipe_shader *shader,
160 union r600_shader_key key)
161 {
162 struct r600_context *rctx = (struct r600_context *)ctx;
163 struct r600_pipe_shader_selector *sel = shader->selector;
164 int r;
165 bool dump = r600_can_dump_shader(&rctx->screen->b,
166 tgsi_get_processor_type(sel->tokens));
167 unsigned use_sb = !(rctx->screen->b.debug_flags & DBG_NO_SB);
168 unsigned sb_disasm;
169 unsigned export_shader;
170
171 shader->shader.bc.isa = rctx->isa;
172
173 if (dump) {
174 fprintf(stderr, "--------------------------------------------------------------\n");
175 tgsi_dump(sel->tokens, 0);
176
177 if (sel->so.num_outputs) {
178 r600_dump_streamout(&sel->so);
179 }
180 }
181 r = r600_shader_from_tgsi(rctx, shader, key);
182 if (r) {
183 R600_ERR("translation from TGSI failed !\n");
184 goto error;
185 }
186 if (shader->shader.processor_type == PIPE_SHADER_VERTEX) {
187 /* only disable for vertex shaders in tess paths */
188 if (key.vs.as_ls)
189 use_sb = 0;
190 }
191 use_sb &= (shader->shader.processor_type != PIPE_SHADER_TESS_CTRL);
192 use_sb &= (shader->shader.processor_type != PIPE_SHADER_TESS_EVAL);
193 use_sb &= (shader->shader.processor_type != PIPE_SHADER_COMPUTE);
194
195 /* disable SB for shaders using doubles */
196 use_sb &= !shader->shader.uses_doubles;
197
198 use_sb &= !shader->shader.uses_atomics;
199 use_sb &= !shader->shader.uses_images;
200 use_sb &= !shader->shader.uses_helper_invocation;
201
202 /* Check if the bytecode has already been built. */
203 if (!shader->shader.bc.bytecode) {
204 r = r600_bytecode_build(&shader->shader.bc);
205 if (r) {
206 R600_ERR("building bytecode failed !\n");
207 goto error;
208 }
209 }
210
211 sb_disasm = use_sb || (rctx->screen->b.debug_flags & DBG_SB_DISASM);
212 if (dump && !sb_disasm) {
213 fprintf(stderr, "--------------------------------------------------------------\n");
214 r600_bytecode_disasm(&shader->shader.bc);
215 fprintf(stderr, "______________________________________________________________\n");
216 } else if ((dump && sb_disasm) || use_sb) {
217 r = r600_sb_bytecode_process(rctx, &shader->shader.bc, &shader->shader,
218 dump, use_sb);
219 if (r) {
220 R600_ERR("r600_sb_bytecode_process failed !\n");
221 goto error;
222 }
223 }
224
225 if (shader->gs_copy_shader) {
226 if (dump) {
227 // dump copy shader
228 r = r600_sb_bytecode_process(rctx, &shader->gs_copy_shader->shader.bc,
229 &shader->gs_copy_shader->shader, dump, 0);
230 if (r)
231 goto error;
232 }
233
234 if ((r = store_shader(ctx, shader->gs_copy_shader)))
235 goto error;
236 }
237
238 /* Store the shader in a buffer. */
239 if ((r = store_shader(ctx, shader)))
240 goto error;
241
242 /* Build state. */
243 switch (shader->shader.processor_type) {
244 case PIPE_SHADER_TESS_CTRL:
245 evergreen_update_hs_state(ctx, shader);
246 break;
247 case PIPE_SHADER_TESS_EVAL:
248 if (key.tes.as_es)
249 evergreen_update_es_state(ctx, shader);
250 else
251 evergreen_update_vs_state(ctx, shader);
252 break;
253 case PIPE_SHADER_GEOMETRY:
254 if (rctx->b.chip_class >= EVERGREEN) {
255 evergreen_update_gs_state(ctx, shader);
256 evergreen_update_vs_state(ctx, shader->gs_copy_shader);
257 } else {
258 r600_update_gs_state(ctx, shader);
259 r600_update_vs_state(ctx, shader->gs_copy_shader);
260 }
261 break;
262 case PIPE_SHADER_VERTEX:
263 export_shader = key.vs.as_es;
264 if (rctx->b.chip_class >= EVERGREEN) {
265 if (key.vs.as_ls)
266 evergreen_update_ls_state(ctx, shader);
267 else if (key.vs.as_es)
268 evergreen_update_es_state(ctx, shader);
269 else
270 evergreen_update_vs_state(ctx, shader);
271 } else {
272 if (export_shader)
273 r600_update_es_state(ctx, shader);
274 else
275 r600_update_vs_state(ctx, shader);
276 }
277 break;
278 case PIPE_SHADER_FRAGMENT:
279 if (rctx->b.chip_class >= EVERGREEN) {
280 evergreen_update_ps_state(ctx, shader);
281 } else {
282 r600_update_ps_state(ctx, shader);
283 }
284 break;
285 case PIPE_SHADER_COMPUTE:
286 evergreen_update_ls_state(ctx, shader);
287 break;
288 default:
289 r = -EINVAL;
290 goto error;
291 }
292 return 0;
293
294 error:
295 r600_pipe_shader_destroy(ctx, shader);
296 return r;
297 }
298
299 void r600_pipe_shader_destroy(struct pipe_context *ctx UNUSED, struct r600_pipe_shader *shader)
300 {
301 r600_resource_reference(&shader->bo, NULL);
302 r600_bytecode_clear(&shader->shader.bc);
303 r600_release_command_buffer(&shader->command_buffer);
304 }
305
306 /*
307 * tgsi -> r600 shader
308 */
309 struct r600_shader_tgsi_instruction;
310
311 struct r600_shader_src {
312 unsigned sel;
313 unsigned swizzle[4];
314 unsigned neg;
315 unsigned abs;
316 unsigned rel;
317 unsigned kc_bank;
318 boolean kc_rel; /* true if cache bank is indexed */
319 uint32_t value[4];
320 };
321
322 struct eg_interp {
323 boolean enabled;
324 unsigned ij_index;
325 };
326
327 struct r600_shader_ctx {
328 struct tgsi_shader_info info;
329 struct tgsi_array_info *array_infos;
330 /* flag for each tgsi temp array if its been spilled or not */
331 bool *spilled_arrays;
332 struct tgsi_parse_context parse;
333 const struct tgsi_token *tokens;
334 unsigned type;
335 unsigned file_offset[TGSI_FILE_COUNT];
336 unsigned temp_reg;
337 const struct r600_shader_tgsi_instruction *inst_info;
338 struct r600_bytecode *bc;
339 struct r600_shader *shader;
340 struct r600_shader_src src[4];
341 uint32_t *literals;
342 uint32_t nliterals;
343 uint32_t max_driver_temp_used;
344 /* needed for evergreen interpolation */
345 struct eg_interp eg_interpolators[6]; // indexed by Persp/Linear * 3 + sample/center/centroid
346 /* evergreen/cayman also store sample mask in face register */
347 int face_gpr;
348 /* sample id is .w component stored in fixed point position register */
349 int fixed_pt_position_gpr;
350 int colors_used;
351 boolean clip_vertex_write;
352 unsigned cv_output;
353 unsigned edgeflag_output;
354 int helper_invoc_reg;
355 int cs_block_size_reg;
356 int cs_grid_size_reg;
357 bool cs_block_size_loaded, cs_grid_size_loaded;
358 int fragcoord_input;
359 int next_ring_offset;
360 int gs_out_ring_offset;
361 int gs_next_vertex;
362 struct r600_shader *gs_for_vs;
363 int gs_export_gpr_tregs[4];
364 int gs_rotated_input[2];
365 const struct pipe_stream_output_info *gs_stream_output_info;
366 unsigned enabled_stream_buffers_mask;
367 unsigned tess_input_info; /* temp with tess input offsets */
368 unsigned tess_output_info; /* temp with tess input offsets */
369 unsigned thread_id_gpr; /* temp with thread id calculated for images */
370 };
371
372 struct r600_shader_tgsi_instruction {
373 unsigned op;
374 int (*process)(struct r600_shader_ctx *ctx);
375 };
376
377 static int emit_gs_ring_writes(struct r600_shader_ctx *ctx, const struct pipe_stream_output_info *so, int stream, bool ind);
378 static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[], eg_shader_tgsi_instruction[], cm_shader_tgsi_instruction[];
379 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx);
380 static inline int callstack_push(struct r600_shader_ctx *ctx, unsigned reason);
381 static void fc_pushlevel(struct r600_shader_ctx *ctx, int type);
382 static int tgsi_else(struct r600_shader_ctx *ctx);
383 static int tgsi_endif(struct r600_shader_ctx *ctx);
384 static int tgsi_bgnloop(struct r600_shader_ctx *ctx);
385 static int tgsi_endloop(struct r600_shader_ctx *ctx);
386 static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx);
387 static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx,
388 unsigned int cb_idx, unsigned cb_rel, unsigned int offset, unsigned ar_chan,
389 unsigned int dst_reg);
390 static void r600_bytecode_src(struct r600_bytecode_alu_src *bc_src,
391 const struct r600_shader_src *shader_src,
392 unsigned chan);
393 static int do_lds_fetch_values(struct r600_shader_ctx *ctx, unsigned temp_reg,
394 unsigned dst_reg, unsigned mask);
395
396 static bool ctx_needs_stack_workaround_8xx(struct r600_shader_ctx *ctx)
397 {
398 if (ctx->bc->family == CHIP_HEMLOCK ||
399 ctx->bc->family == CHIP_CYPRESS ||
400 ctx->bc->family == CHIP_JUNIPER)
401 return false;
402 return true;
403 }
404
405 static int tgsi_last_instruction(unsigned writemask)
406 {
407 int i, lasti = 0;
408
409 for (i = 0; i < 4; i++) {
410 if (writemask & (1 << i)) {
411 lasti = i;
412 }
413 }
414 return lasti;
415 }
416
417 static int tgsi_is_supported(struct r600_shader_ctx *ctx)
418 {
419 struct tgsi_full_instruction *i = &ctx->parse.FullToken.FullInstruction;
420 unsigned j;
421
422 if (i->Instruction.NumDstRegs > 1 && i->Instruction.Opcode != TGSI_OPCODE_DFRACEXP) {
423 R600_ERR("too many dst (%d)\n", i->Instruction.NumDstRegs);
424 return -EINVAL;
425 }
426 #if 0
427 if (i->Instruction.Label) {
428 R600_ERR("label unsupported\n");
429 return -EINVAL;
430 }
431 #endif
432 for (j = 0; j < i->Instruction.NumSrcRegs; j++) {
433 if (i->Src[j].Register.Dimension) {
434 switch (i->Src[j].Register.File) {
435 case TGSI_FILE_CONSTANT:
436 case TGSI_FILE_HW_ATOMIC:
437 break;
438 case TGSI_FILE_INPUT:
439 if (ctx->type == PIPE_SHADER_GEOMETRY ||
440 ctx->type == PIPE_SHADER_TESS_CTRL ||
441 ctx->type == PIPE_SHADER_TESS_EVAL)
442 break;
443 case TGSI_FILE_OUTPUT:
444 if (ctx->type == PIPE_SHADER_TESS_CTRL)
445 break;
446 default:
447 R600_ERR("unsupported src %d (file %d, dimension %d)\n", j,
448 i->Src[j].Register.File,
449 i->Src[j].Register.Dimension);
450 return -EINVAL;
451 }
452 }
453 }
454 for (j = 0; j < i->Instruction.NumDstRegs; j++) {
455 if (i->Dst[j].Register.Dimension) {
456 if (ctx->type == PIPE_SHADER_TESS_CTRL)
457 continue;
458 R600_ERR("unsupported dst (dimension)\n");
459 return -EINVAL;
460 }
461 }
462 return 0;
463 }
464
465 int eg_get_interpolator_index(unsigned interpolate, unsigned location)
466 {
467 if (interpolate == TGSI_INTERPOLATE_COLOR ||
468 interpolate == TGSI_INTERPOLATE_LINEAR ||
469 interpolate == TGSI_INTERPOLATE_PERSPECTIVE)
470 {
471 int is_linear = interpolate == TGSI_INTERPOLATE_LINEAR;
472 int loc;
473
474 switch(location) {
475 case TGSI_INTERPOLATE_LOC_CENTER:
476 loc = 1;
477 break;
478 case TGSI_INTERPOLATE_LOC_CENTROID:
479 loc = 2;
480 break;
481 case TGSI_INTERPOLATE_LOC_SAMPLE:
482 default:
483 loc = 0; break;
484 }
485
486 return is_linear * 3 + loc;
487 }
488
489 return -1;
490 }
491
492 static void evergreen_interp_assign_ij_index(struct r600_shader_ctx *ctx,
493 int input)
494 {
495 int i = eg_get_interpolator_index(
496 ctx->shader->input[input].interpolate,
497 ctx->shader->input[input].interpolate_location);
498 assert(i >= 0);
499 ctx->shader->input[input].ij_index = ctx->eg_interpolators[i].ij_index;
500 }
501
502 static int evergreen_interp_alu(struct r600_shader_ctx *ctx, int input)
503 {
504 int i, r;
505 struct r600_bytecode_alu alu;
506 int gpr = 0, base_chan = 0;
507 int ij_index = ctx->shader->input[input].ij_index;
508
509 /* work out gpr and base_chan from index */
510 gpr = ij_index / 2;
511 base_chan = (2 * (ij_index % 2)) + 1;
512
513 for (i = 0; i < 8; i++) {
514 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
515
516 if (i < 4)
517 alu.op = ALU_OP2_INTERP_ZW;
518 else
519 alu.op = ALU_OP2_INTERP_XY;
520
521 if ((i > 1) && (i < 6)) {
522 alu.dst.sel = ctx->shader->input[input].gpr;
523 alu.dst.write = 1;
524 }
525
526 alu.dst.chan = i % 4;
527
528 alu.src[0].sel = gpr;
529 alu.src[0].chan = (base_chan - (i % 2));
530
531 alu.src[1].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
532
533 alu.bank_swizzle_force = SQ_ALU_VEC_210;
534 if ((i % 4) == 3)
535 alu.last = 1;
536 r = r600_bytecode_add_alu(ctx->bc, &alu);
537 if (r)
538 return r;
539 }
540 return 0;
541 }
542
543 static int evergreen_interp_flat(struct r600_shader_ctx *ctx, int input)
544 {
545 int i, r;
546 struct r600_bytecode_alu alu;
547
548 for (i = 0; i < 4; i++) {
549 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
550
551 alu.op = ALU_OP1_INTERP_LOAD_P0;
552
553 alu.dst.sel = ctx->shader->input[input].gpr;
554 alu.dst.write = 1;
555
556 alu.dst.chan = i;
557
558 alu.src[0].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
559 alu.src[0].chan = i;
560
561 if (i == 3)
562 alu.last = 1;
563 r = r600_bytecode_add_alu(ctx->bc, &alu);
564 if (r)
565 return r;
566 }
567 return 0;
568 }
569
570 /*
571 * Special export handling in shaders
572 *
573 * shader export ARRAY_BASE for EXPORT_POS:
574 * 60 is position
575 * 61 is misc vector
576 * 62, 63 are clip distance vectors
577 *
578 * The use of the values exported in 61-63 are controlled by PA_CL_VS_OUT_CNTL:
579 * VS_OUT_MISC_VEC_ENA - enables the use of all fields in export 61
580 * USE_VTX_POINT_SIZE - point size in the X channel of export 61
581 * USE_VTX_EDGE_FLAG - edge flag in the Y channel of export 61
582 * USE_VTX_RENDER_TARGET_INDX - render target index in the Z channel of export 61
583 * USE_VTX_VIEWPORT_INDX - viewport index in the W channel of export 61
584 * USE_VTX_KILL_FLAG - kill flag in the Z channel of export 61 (mutually
585 * exclusive from render target index)
586 * VS_OUT_CCDIST0_VEC_ENA/VS_OUT_CCDIST1_VEC_ENA - enable clip distance vectors
587 *
588 *
589 * shader export ARRAY_BASE for EXPORT_PIXEL:
590 * 0-7 CB targets
591 * 61 computed Z vector
592 *
593 * The use of the values exported in the computed Z vector are controlled
594 * by DB_SHADER_CONTROL:
595 * Z_EXPORT_ENABLE - Z as a float in RED
596 * STENCIL_REF_EXPORT_ENABLE - stencil ref as int in GREEN
597 * COVERAGE_TO_MASK_ENABLE - alpha to mask in ALPHA
598 * MASK_EXPORT_ENABLE - pixel sample mask in BLUE
599 * DB_SOURCE_FORMAT - export control restrictions
600 *
601 */
602
603
604 /* Map name/sid pair from tgsi to the 8-bit semantic index for SPI setup */
605 static int r600_spi_sid(struct r600_shader_io * io)
606 {
607 int index, name = io->name;
608
609 /* These params are handled differently, they don't need
610 * semantic indices, so we'll use 0 for them.
611 */
612 if (name == TGSI_SEMANTIC_POSITION ||
613 name == TGSI_SEMANTIC_PSIZE ||
614 name == TGSI_SEMANTIC_EDGEFLAG ||
615 name == TGSI_SEMANTIC_FACE ||
616 name == TGSI_SEMANTIC_SAMPLEMASK)
617 index = 0;
618 else {
619 if (name == TGSI_SEMANTIC_GENERIC) {
620 /* For generic params simply use sid from tgsi */
621 index = io->sid;
622 } else {
623 /* For non-generic params - pack name and sid into 8 bits */
624 index = 0x80 | (name<<3) | (io->sid);
625 }
626
627 /* Make sure that all really used indices have nonzero value, so
628 * we can just compare it to 0 later instead of comparing the name
629 * with different values to detect special cases. */
630 index++;
631 }
632
633 return index;
634 };
635
636 /* we need this to get a common lds index for vs/tcs/tes input/outputs */
637 int r600_get_lds_unique_index(unsigned semantic_name, unsigned index)
638 {
639 switch (semantic_name) {
640 case TGSI_SEMANTIC_POSITION:
641 return 0;
642 case TGSI_SEMANTIC_PSIZE:
643 return 1;
644 case TGSI_SEMANTIC_CLIPDIST:
645 assert(index <= 1);
646 return 2 + index;
647 case TGSI_SEMANTIC_GENERIC:
648 if (index <= 63-4)
649 return 4 + index - 9;
650 else
651 /* same explanation as in the default statement,
652 * the only user hitting this is st/nine.
653 */
654 return 0;
655
656 /* patch indices are completely separate and thus start from 0 */
657 case TGSI_SEMANTIC_TESSOUTER:
658 return 0;
659 case TGSI_SEMANTIC_TESSINNER:
660 return 1;
661 case TGSI_SEMANTIC_PATCH:
662 return 2 + index;
663
664 default:
665 /* Don't fail here. The result of this function is only used
666 * for LS, TCS, TES, and GS, where legacy GL semantics can't
667 * occur, but this function is called for all vertex shaders
668 * before it's known whether LS will be compiled or not.
669 */
670 return 0;
671 }
672 }
673
674 /* turn input into interpolate on EG */
675 static int evergreen_interp_input(struct r600_shader_ctx *ctx, int index)
676 {
677 int r = 0;
678
679 if (ctx->shader->input[index].spi_sid) {
680 ctx->shader->input[index].lds_pos = ctx->shader->nlds++;
681 if (ctx->shader->input[index].interpolate > 0) {
682 evergreen_interp_assign_ij_index(ctx, index);
683 r = evergreen_interp_alu(ctx, index);
684 } else {
685 r = evergreen_interp_flat(ctx, index);
686 }
687 }
688 return r;
689 }
690
691 static int select_twoside_color(struct r600_shader_ctx *ctx, int front, int back)
692 {
693 struct r600_bytecode_alu alu;
694 int i, r;
695 int gpr_front = ctx->shader->input[front].gpr;
696 int gpr_back = ctx->shader->input[back].gpr;
697
698 for (i = 0; i < 4; i++) {
699 memset(&alu, 0, sizeof(alu));
700 alu.op = ALU_OP3_CNDGT;
701 alu.is_op3 = 1;
702 alu.dst.write = 1;
703 alu.dst.sel = gpr_front;
704 alu.src[0].sel = ctx->face_gpr;
705 alu.src[1].sel = gpr_front;
706 alu.src[2].sel = gpr_back;
707
708 alu.dst.chan = i;
709 alu.src[1].chan = i;
710 alu.src[2].chan = i;
711 alu.last = (i==3);
712
713 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
714 return r;
715 }
716
717 return 0;
718 }
719
720 /* execute a single slot ALU calculation */
721 static int single_alu_op2(struct r600_shader_ctx *ctx, int op,
722 int dst_sel, int dst_chan,
723 int src0_sel, unsigned src0_chan_val,
724 int src1_sel, unsigned src1_chan_val)
725 {
726 struct r600_bytecode_alu alu;
727 int r, i;
728
729 if (ctx->bc->chip_class == CAYMAN && op == ALU_OP2_MULLO_INT) {
730 for (i = 0; i < 4; i++) {
731 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
732 alu.op = op;
733 alu.src[0].sel = src0_sel;
734 if (src0_sel == V_SQ_ALU_SRC_LITERAL)
735 alu.src[0].value = src0_chan_val;
736 else
737 alu.src[0].chan = src0_chan_val;
738 alu.src[1].sel = src1_sel;
739 if (src1_sel == V_SQ_ALU_SRC_LITERAL)
740 alu.src[1].value = src1_chan_val;
741 else
742 alu.src[1].chan = src1_chan_val;
743 alu.dst.sel = dst_sel;
744 alu.dst.chan = i;
745 alu.dst.write = i == dst_chan;
746 alu.last = (i == 3);
747 r = r600_bytecode_add_alu(ctx->bc, &alu);
748 if (r)
749 return r;
750 }
751 return 0;
752 }
753
754 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
755 alu.op = op;
756 alu.src[0].sel = src0_sel;
757 if (src0_sel == V_SQ_ALU_SRC_LITERAL)
758 alu.src[0].value = src0_chan_val;
759 else
760 alu.src[0].chan = src0_chan_val;
761 alu.src[1].sel = src1_sel;
762 if (src1_sel == V_SQ_ALU_SRC_LITERAL)
763 alu.src[1].value = src1_chan_val;
764 else
765 alu.src[1].chan = src1_chan_val;
766 alu.dst.sel = dst_sel;
767 alu.dst.chan = dst_chan;
768 alu.dst.write = 1;
769 alu.last = 1;
770 r = r600_bytecode_add_alu(ctx->bc, &alu);
771 if (r)
772 return r;
773 return 0;
774 }
775
776 /* execute a single slot ALU calculation */
777 static int single_alu_op3(struct r600_shader_ctx *ctx, int op,
778 int dst_sel, int dst_chan,
779 int src0_sel, unsigned src0_chan_val,
780 int src1_sel, unsigned src1_chan_val,
781 int src2_sel, unsigned src2_chan_val)
782 {
783 struct r600_bytecode_alu alu;
784 int r;
785
786 /* validate this for other ops */
787 assert(op == ALU_OP3_MULADD_UINT24 || op == ALU_OP3_CNDE_INT || op == ALU_OP3_BFE_UINT);
788 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
789 alu.op = op;
790 alu.src[0].sel = src0_sel;
791 if (src0_sel == V_SQ_ALU_SRC_LITERAL)
792 alu.src[0].value = src0_chan_val;
793 else
794 alu.src[0].chan = src0_chan_val;
795 alu.src[1].sel = src1_sel;
796 if (src1_sel == V_SQ_ALU_SRC_LITERAL)
797 alu.src[1].value = src1_chan_val;
798 else
799 alu.src[1].chan = src1_chan_val;
800 alu.src[2].sel = src2_sel;
801 if (src2_sel == V_SQ_ALU_SRC_LITERAL)
802 alu.src[2].value = src2_chan_val;
803 else
804 alu.src[2].chan = src2_chan_val;
805 alu.dst.sel = dst_sel;
806 alu.dst.chan = dst_chan;
807 alu.is_op3 = 1;
808 alu.last = 1;
809 r = r600_bytecode_add_alu(ctx->bc, &alu);
810 if (r)
811 return r;
812 return 0;
813 }
814
815 /* put it in temp_reg.x */
816 static int get_lds_offset0(struct r600_shader_ctx *ctx,
817 int rel_patch_chan,
818 int temp_reg, bool is_patch_var)
819 {
820 int r;
821
822 /* MUL temp.x, patch_stride (input_vals.x), rel_patch_id (r0.y (tcs)) */
823 /* ADD
824 Dimension - patch0_offset (input_vals.z),
825 Non-dim - patch0_data_offset (input_vals.w)
826 */
827 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
828 temp_reg, 0,
829 ctx->tess_output_info, 0,
830 0, rel_patch_chan,
831 ctx->tess_output_info, is_patch_var ? 3 : 2);
832 if (r)
833 return r;
834 return 0;
835 }
836
837 static inline int get_address_file_reg(struct r600_shader_ctx *ctx, int index)
838 {
839 return index > 0 ? ctx->bc->index_reg[index - 1] : ctx->bc->ar_reg;
840 }
841
842 static int r600_get_temp(struct r600_shader_ctx *ctx)
843 {
844 return ctx->temp_reg + ctx->max_driver_temp_used++;
845 }
846
847 static int vs_add_primid_output(struct r600_shader_ctx *ctx, int prim_id_sid)
848 {
849 int i;
850 i = ctx->shader->noutput++;
851 ctx->shader->output[i].name = TGSI_SEMANTIC_PRIMID;
852 ctx->shader->output[i].sid = 0;
853 ctx->shader->output[i].gpr = 0;
854 ctx->shader->output[i].interpolate = TGSI_INTERPOLATE_CONSTANT;
855 ctx->shader->output[i].write_mask = 0x4;
856 ctx->shader->output[i].spi_sid = prim_id_sid;
857
858 return 0;
859 }
860
861 static int tgsi_barrier(struct r600_shader_ctx *ctx)
862 {
863 struct r600_bytecode_alu alu;
864 int r;
865
866 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
867 alu.op = ctx->inst_info->op;
868 alu.last = 1;
869
870 r = r600_bytecode_add_alu(ctx->bc, &alu);
871 if (r)
872 return r;
873 return 0;
874 }
875
876 static void choose_spill_arrays(struct r600_shader_ctx *ctx, int *regno, unsigned *scratch_space_needed)
877 {
878 // pick largest array and spill it, repeat until the number of temps is under limit or we run out of arrays
879 unsigned n = ctx->info.array_max[TGSI_FILE_TEMPORARY];
880 unsigned narrays_left = n;
881 bool *spilled = ctx->spilled_arrays; // assumed calloc:ed
882
883 *scratch_space_needed = 0;
884 while (*regno > 124 && narrays_left) {
885 unsigned i;
886 unsigned largest = 0;
887 unsigned largest_index = 0;
888
889 for (i = 0; i < n; i++) {
890 unsigned size = ctx->array_infos[i].range.Last - ctx->array_infos[i].range.First + 1;
891 if (!spilled[i] && size > largest) {
892 largest = size;
893 largest_index = i;
894 }
895 }
896
897 spilled[largest_index] = true;
898 *regno -= largest;
899 *scratch_space_needed += largest;
900
901 narrays_left --;
902 }
903
904 if (narrays_left == 0) {
905 ctx->info.indirect_files &= ~(1 << TGSI_FILE_TEMPORARY);
906 }
907 }
908
909 /* Take spilled temp arrays into account when translating tgsi register
910 * indexes into r600 gprs if spilled is false, or scratch array offset if
911 * spilled is true */
912 static int map_tgsi_reg_index_to_r600_gpr(struct r600_shader_ctx *ctx, unsigned tgsi_reg_index, bool *spilled)
913 {
914 unsigned i;
915 unsigned spilled_size = 0;
916
917 for (i = 0; i < ctx->info.array_max[TGSI_FILE_TEMPORARY]; i++) {
918 if (tgsi_reg_index >= ctx->array_infos[i].range.First && tgsi_reg_index <= ctx->array_infos[i].range.Last) {
919 if (ctx->spilled_arrays[i]) {
920 /* vec4 index into spilled scratch memory */
921 *spilled = true;
922 return tgsi_reg_index - ctx->array_infos[i].range.First + spilled_size;
923 }
924 else {
925 /* regular GPR array */
926 *spilled = false;
927 return tgsi_reg_index - spilled_size + ctx->file_offset[TGSI_FILE_TEMPORARY];
928 }
929 }
930
931 if (tgsi_reg_index < ctx->array_infos[i].range.First)
932 break;
933 if (ctx->spilled_arrays[i]) {
934 spilled_size += ctx->array_infos[i].range.Last - ctx->array_infos[i].range.First + 1;
935 }
936 }
937
938 /* regular GPR index, minus the holes from spilled arrays */
939 *spilled = false;
940
941 return tgsi_reg_index - spilled_size + ctx->file_offset[TGSI_FILE_TEMPORARY];
942 }
943
944 /* look up spill area base offset and array size for a spilled temp array */
945 static void get_spilled_array_base_and_size(struct r600_shader_ctx *ctx, unsigned tgsi_reg_index,
946 unsigned *array_base, unsigned *array_size)
947 {
948 unsigned i;
949 unsigned offset = 0;
950
951 for (i = 0; i < ctx->info.array_max[TGSI_FILE_TEMPORARY]; i++) {
952 if (ctx->spilled_arrays[i]) {
953 unsigned size = ctx->array_infos[i].range.Last - ctx->array_infos[i].range.First + 1;
954
955 if (tgsi_reg_index >= ctx->array_infos[i].range.First && tgsi_reg_index <= ctx->array_infos[i].range.Last) {
956 *array_base = offset;
957 *array_size = size - 1; /* hw counts from 1 */
958
959 return;
960 }
961
962 offset += size;
963 }
964 }
965 }
966
967 static int tgsi_declaration(struct r600_shader_ctx *ctx)
968 {
969 struct tgsi_full_declaration *d = &ctx->parse.FullToken.FullDeclaration;
970 int r, i, j, count = d->Range.Last - d->Range.First + 1;
971
972 switch (d->Declaration.File) {
973 case TGSI_FILE_INPUT:
974 for (j = 0; j < count; j++) {
975 i = ctx->shader->ninput + j;
976 assert(i < ARRAY_SIZE(ctx->shader->input));
977 ctx->shader->input[i].name = d->Semantic.Name;
978 ctx->shader->input[i].sid = d->Semantic.Index + j;
979 ctx->shader->input[i].interpolate = d->Interp.Interpolate;
980 ctx->shader->input[i].interpolate_location = d->Interp.Location;
981 ctx->shader->input[i].gpr = ctx->file_offset[TGSI_FILE_INPUT] + d->Range.First + j;
982 if (ctx->type == PIPE_SHADER_FRAGMENT) {
983 ctx->shader->input[i].spi_sid = r600_spi_sid(&ctx->shader->input[i]);
984 switch (ctx->shader->input[i].name) {
985 case TGSI_SEMANTIC_FACE:
986 if (ctx->face_gpr != -1)
987 ctx->shader->input[i].gpr = ctx->face_gpr; /* already allocated by allocate_system_value_inputs */
988 else
989 ctx->face_gpr = ctx->shader->input[i].gpr;
990 break;
991 case TGSI_SEMANTIC_COLOR:
992 ctx->colors_used++;
993 break;
994 case TGSI_SEMANTIC_POSITION:
995 ctx->fragcoord_input = i;
996 break;
997 case TGSI_SEMANTIC_PRIMID:
998 /* set this for now */
999 ctx->shader->gs_prim_id_input = true;
1000 ctx->shader->ps_prim_id_input = i;
1001 break;
1002 }
1003 if (ctx->bc->chip_class >= EVERGREEN) {
1004 if ((r = evergreen_interp_input(ctx, i)))
1005 return r;
1006 }
1007 } else if (ctx->type == PIPE_SHADER_GEOMETRY) {
1008 /* FIXME probably skip inputs if they aren't passed in the ring */
1009 ctx->shader->input[i].ring_offset = ctx->next_ring_offset;
1010 ctx->next_ring_offset += 16;
1011 if (ctx->shader->input[i].name == TGSI_SEMANTIC_PRIMID)
1012 ctx->shader->gs_prim_id_input = true;
1013 }
1014 }
1015 ctx->shader->ninput += count;
1016 break;
1017 case TGSI_FILE_OUTPUT:
1018 for (j = 0; j < count; j++) {
1019 i = ctx->shader->noutput + j;
1020 assert(i < ARRAY_SIZE(ctx->shader->output));
1021 ctx->shader->output[i].name = d->Semantic.Name;
1022 ctx->shader->output[i].sid = d->Semantic.Index + j;
1023 ctx->shader->output[i].gpr = ctx->file_offset[TGSI_FILE_OUTPUT] + d->Range.First + j;
1024 ctx->shader->output[i].interpolate = d->Interp.Interpolate;
1025 ctx->shader->output[i].write_mask = d->Declaration.UsageMask;
1026 if (ctx->type == PIPE_SHADER_VERTEX ||
1027 ctx->type == PIPE_SHADER_GEOMETRY ||
1028 ctx->type == PIPE_SHADER_TESS_EVAL) {
1029 ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);
1030 switch (d->Semantic.Name) {
1031 case TGSI_SEMANTIC_CLIPDIST:
1032 break;
1033 case TGSI_SEMANTIC_PSIZE:
1034 ctx->shader->vs_out_misc_write = 1;
1035 ctx->shader->vs_out_point_size = 1;
1036 break;
1037 case TGSI_SEMANTIC_EDGEFLAG:
1038 ctx->shader->vs_out_misc_write = 1;
1039 ctx->shader->vs_out_edgeflag = 1;
1040 ctx->edgeflag_output = i;
1041 break;
1042 case TGSI_SEMANTIC_VIEWPORT_INDEX:
1043 ctx->shader->vs_out_misc_write = 1;
1044 ctx->shader->vs_out_viewport = 1;
1045 break;
1046 case TGSI_SEMANTIC_LAYER:
1047 ctx->shader->vs_out_misc_write = 1;
1048 ctx->shader->vs_out_layer = 1;
1049 break;
1050 case TGSI_SEMANTIC_CLIPVERTEX:
1051 ctx->clip_vertex_write = TRUE;
1052 ctx->cv_output = i;
1053 break;
1054 }
1055 if (ctx->type == PIPE_SHADER_GEOMETRY) {
1056 ctx->gs_out_ring_offset += 16;
1057 }
1058 } else if (ctx->type == PIPE_SHADER_FRAGMENT) {
1059 switch (d->Semantic.Name) {
1060 case TGSI_SEMANTIC_COLOR:
1061 ctx->shader->nr_ps_max_color_exports++;
1062 break;
1063 }
1064 }
1065 }
1066 ctx->shader->noutput += count;
1067 break;
1068 case TGSI_FILE_TEMPORARY:
1069 if (ctx->info.indirect_files & (1 << TGSI_FILE_TEMPORARY)) {
1070 if (d->Array.ArrayID) {
1071 bool spilled;
1072 unsigned idx = map_tgsi_reg_index_to_r600_gpr(ctx,
1073 d->Range.First,
1074 &spilled);
1075
1076 if (!spilled) {
1077 r600_add_gpr_array(ctx->shader, idx,
1078 d->Range.Last - d->Range.First + 1, 0x0F);
1079 }
1080 }
1081 }
1082 break;
1083
1084 case TGSI_FILE_CONSTANT:
1085 case TGSI_FILE_SAMPLER:
1086 case TGSI_FILE_SAMPLER_VIEW:
1087 case TGSI_FILE_ADDRESS:
1088 case TGSI_FILE_BUFFER:
1089 case TGSI_FILE_IMAGE:
1090 case TGSI_FILE_MEMORY:
1091 break;
1092
1093 case TGSI_FILE_HW_ATOMIC:
1094 i = ctx->shader->nhwatomic_ranges;
1095 ctx->shader->atomics[i].start = d->Range.First;
1096 ctx->shader->atomics[i].end = d->Range.Last;
1097 ctx->shader->atomics[i].hw_idx = ctx->shader->atomic_base + ctx->shader->nhwatomic;
1098 ctx->shader->atomics[i].array_id = d->Array.ArrayID;
1099 ctx->shader->atomics[i].buffer_id = d->Dim.Index2D;
1100 ctx->shader->nhwatomic_ranges++;
1101 ctx->shader->nhwatomic += count;
1102 break;
1103
1104 case TGSI_FILE_SYSTEM_VALUE:
1105 if (d->Semantic.Name == TGSI_SEMANTIC_SAMPLEMASK ||
1106 d->Semantic.Name == TGSI_SEMANTIC_SAMPLEID ||
1107 d->Semantic.Name == TGSI_SEMANTIC_SAMPLEPOS) {
1108 break; /* Already handled from allocate_system_value_inputs */
1109 } else if (d->Semantic.Name == TGSI_SEMANTIC_INSTANCEID) {
1110 break;
1111 } else if (d->Semantic.Name == TGSI_SEMANTIC_VERTEXID)
1112 break;
1113 else if (d->Semantic.Name == TGSI_SEMANTIC_INVOCATIONID)
1114 break;
1115 else if (d->Semantic.Name == TGSI_SEMANTIC_TESSINNER ||
1116 d->Semantic.Name == TGSI_SEMANTIC_TESSOUTER) {
1117 int param = r600_get_lds_unique_index(d->Semantic.Name, 0);
1118 int dreg = d->Semantic.Name == TGSI_SEMANTIC_TESSINNER ? 3 : 2;
1119 unsigned temp_reg = r600_get_temp(ctx);
1120
1121 r = get_lds_offset0(ctx, 2, temp_reg, true);
1122 if (r)
1123 return r;
1124
1125 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
1126 temp_reg, 0,
1127 temp_reg, 0,
1128 V_SQ_ALU_SRC_LITERAL, param * 16);
1129 if (r)
1130 return r;
1131
1132 do_lds_fetch_values(ctx, temp_reg, dreg, 0xf);
1133 }
1134 else if (d->Semantic.Name == TGSI_SEMANTIC_TESSCOORD) {
1135 /* MOV r1.x, r0.x;
1136 MOV r1.y, r0.y;
1137 */
1138 for (i = 0; i < 2; i++) {
1139 struct r600_bytecode_alu alu;
1140 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1141 alu.op = ALU_OP1_MOV;
1142 alu.src[0].sel = 0;
1143 alu.src[0].chan = 0 + i;
1144 alu.dst.sel = 1;
1145 alu.dst.chan = 0 + i;
1146 alu.dst.write = 1;
1147 alu.last = (i == 1) ? 1 : 0;
1148 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1149 return r;
1150 }
1151 /* ADD r1.z, 1.0f, -r0.x */
1152 struct r600_bytecode_alu alu;
1153 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1154 alu.op = ALU_OP2_ADD;
1155 alu.src[0].sel = V_SQ_ALU_SRC_1;
1156 alu.src[1].sel = 1;
1157 alu.src[1].chan = 0;
1158 alu.src[1].neg = 1;
1159 alu.dst.sel = 1;
1160 alu.dst.chan = 2;
1161 alu.dst.write = 1;
1162 alu.last = 1;
1163 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1164 return r;
1165
1166 /* ADD r1.z, r1.z, -r1.y */
1167 alu.op = ALU_OP2_ADD;
1168 alu.src[0].sel = 1;
1169 alu.src[0].chan = 2;
1170 alu.src[1].sel = 1;
1171 alu.src[1].chan = 1;
1172 alu.src[1].neg = 1;
1173 alu.dst.sel = 1;
1174 alu.dst.chan = 2;
1175 alu.dst.write = 1;
1176 alu.last = 1;
1177 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1178 return r;
1179 break;
1180 }
1181 break;
1182 default:
1183 R600_ERR("unsupported file %d declaration\n", d->Declaration.File);
1184 return -EINVAL;
1185 }
1186 return 0;
1187 }
1188
1189 static int allocate_system_value_inputs(struct r600_shader_ctx *ctx, int gpr_offset)
1190 {
1191 struct tgsi_parse_context parse;
1192 struct {
1193 boolean enabled;
1194 int *reg;
1195 unsigned name, alternate_name;
1196 } inputs[2] = {
1197 { false, &ctx->face_gpr, TGSI_SEMANTIC_SAMPLEMASK, ~0u }, /* lives in Front Face GPR.z */
1198
1199 { false, &ctx->fixed_pt_position_gpr, TGSI_SEMANTIC_SAMPLEID, TGSI_SEMANTIC_SAMPLEPOS } /* SAMPLEID is in Fixed Point Position GPR.w */
1200 };
1201 int num_regs = 0;
1202 unsigned k, i;
1203
1204 if (tgsi_parse_init(&parse, ctx->tokens) != TGSI_PARSE_OK) {
1205 return 0;
1206 }
1207
1208 /* need to scan shader for system values and interpolateAtSample/Offset/Centroid */
1209 while (!tgsi_parse_end_of_tokens(&parse)) {
1210 tgsi_parse_token(&parse);
1211
1212 if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_INSTRUCTION) {
1213 const struct tgsi_full_instruction *inst = &parse.FullToken.FullInstruction;
1214 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE ||
1215 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
1216 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_CENTROID)
1217 {
1218 int interpolate, location, k;
1219
1220 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
1221 location = TGSI_INTERPOLATE_LOC_CENTER;
1222 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
1223 location = TGSI_INTERPOLATE_LOC_CENTER;
1224 /* Needs sample positions, currently those are always available */
1225 } else {
1226 location = TGSI_INTERPOLATE_LOC_CENTROID;
1227 }
1228
1229 interpolate = ctx->info.input_interpolate[inst->Src[0].Register.Index];
1230 k = eg_get_interpolator_index(interpolate, location);
1231 if (k >= 0)
1232 ctx->eg_interpolators[k].enabled = true;
1233 }
1234 } else if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_DECLARATION) {
1235 struct tgsi_full_declaration *d = &parse.FullToken.FullDeclaration;
1236 if (d->Declaration.File == TGSI_FILE_SYSTEM_VALUE) {
1237 for (k = 0; k < ARRAY_SIZE(inputs); k++) {
1238 if (d->Semantic.Name == inputs[k].name ||
1239 d->Semantic.Name == inputs[k].alternate_name) {
1240 inputs[k].enabled = true;
1241 }
1242 }
1243 }
1244 }
1245 }
1246
1247 tgsi_parse_free(&parse);
1248
1249 if (ctx->info.reads_samplemask &&
1250 (ctx->info.uses_linear_sample || ctx->info.uses_persp_sample)) {
1251 inputs[1].enabled = true;
1252 }
1253
1254 if (ctx->bc->chip_class >= EVERGREEN) {
1255 int num_baryc = 0;
1256 /* assign gpr to each interpolator according to priority */
1257 for (i = 0; i < ARRAY_SIZE(ctx->eg_interpolators); i++) {
1258 if (ctx->eg_interpolators[i].enabled) {
1259 ctx->eg_interpolators[i].ij_index = num_baryc;
1260 num_baryc++;
1261 }
1262 }
1263 num_baryc = (num_baryc + 1) >> 1;
1264 gpr_offset += num_baryc;
1265 }
1266
1267 for (i = 0; i < ARRAY_SIZE(inputs); i++) {
1268 boolean enabled = inputs[i].enabled;
1269 int *reg = inputs[i].reg;
1270 unsigned name = inputs[i].name;
1271
1272 if (enabled) {
1273 int gpr = gpr_offset + num_regs++;
1274 ctx->shader->nsys_inputs++;
1275
1276 // add to inputs, allocate a gpr
1277 k = ctx->shader->ninput++;
1278 ctx->shader->input[k].name = name;
1279 ctx->shader->input[k].sid = 0;
1280 ctx->shader->input[k].interpolate = TGSI_INTERPOLATE_CONSTANT;
1281 ctx->shader->input[k].interpolate_location = TGSI_INTERPOLATE_LOC_CENTER;
1282 *reg = ctx->shader->input[k].gpr = gpr;
1283 }
1284 }
1285
1286 return gpr_offset + num_regs;
1287 }
1288
1289 /*
1290 * for evergreen we need to scan the shader to find the number of GPRs we need to
1291 * reserve for interpolation and system values
1292 *
1293 * we need to know if we are going to emit any sample or centroid inputs
1294 * if perspective and linear are required
1295 */
1296 static int evergreen_gpr_count(struct r600_shader_ctx *ctx)
1297 {
1298 unsigned i;
1299
1300 memset(&ctx->eg_interpolators, 0, sizeof(ctx->eg_interpolators));
1301
1302 /*
1303 * Could get this information from the shader info. But right now
1304 * we interpolate all declared inputs, whereas the shader info will
1305 * only contain the bits if the inputs are actually used, so it might
1306 * not be safe...
1307 */
1308 for (i = 0; i < ctx->info.num_inputs; i++) {
1309 int k;
1310 /* skip position/face/mask/sampleid */
1311 if (ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_POSITION ||
1312 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_FACE ||
1313 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_SAMPLEMASK ||
1314 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_SAMPLEID)
1315 continue;
1316
1317 k = eg_get_interpolator_index(
1318 ctx->info.input_interpolate[i],
1319 ctx->info.input_interpolate_loc[i]);
1320 if (k >= 0)
1321 ctx->eg_interpolators[k].enabled = TRUE;
1322 }
1323
1324 /* XXX PULL MODEL and LINE STIPPLE */
1325
1326 return allocate_system_value_inputs(ctx, 0);
1327 }
1328
1329 /* sample_id_sel == NULL means fetch for current sample */
1330 static int load_sample_position(struct r600_shader_ctx *ctx, struct r600_shader_src *sample_id, int chan_sel)
1331 {
1332 struct r600_bytecode_vtx vtx;
1333 int r, t1;
1334
1335 t1 = r600_get_temp(ctx);
1336
1337 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
1338 vtx.op = FETCH_OP_VFETCH;
1339 vtx.buffer_id = R600_BUFFER_INFO_CONST_BUFFER;
1340 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1341 if (sample_id == NULL) {
1342 assert(ctx->fixed_pt_position_gpr != -1);
1343
1344 vtx.src_gpr = ctx->fixed_pt_position_gpr; // SAMPLEID is in .w;
1345 vtx.src_sel_x = 3;
1346 }
1347 else {
1348 struct r600_bytecode_alu alu;
1349
1350 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1351 alu.op = ALU_OP1_MOV;
1352 r600_bytecode_src(&alu.src[0], sample_id, chan_sel);
1353 alu.dst.sel = t1;
1354 alu.dst.write = 1;
1355 alu.last = 1;
1356 r = r600_bytecode_add_alu(ctx->bc, &alu);
1357 if (r)
1358 return r;
1359
1360 vtx.src_gpr = t1;
1361 vtx.src_sel_x = 0;
1362 }
1363 vtx.mega_fetch_count = 16;
1364 vtx.dst_gpr = t1;
1365 vtx.dst_sel_x = 0;
1366 vtx.dst_sel_y = 1;
1367 vtx.dst_sel_z = 2;
1368 vtx.dst_sel_w = 3;
1369 vtx.data_format = FMT_32_32_32_32_FLOAT;
1370 vtx.num_format_all = 2;
1371 vtx.format_comp_all = 1;
1372 vtx.use_const_fields = 0;
1373 vtx.offset = 0;
1374 vtx.endian = r600_endian_swap(32);
1375 vtx.srf_mode_all = 1; /* SRF_MODE_NO_ZERO */
1376
1377 r = r600_bytecode_add_vtx(ctx->bc, &vtx);
1378 if (r)
1379 return r;
1380
1381 return t1;
1382 }
1383
1384 static int eg_load_helper_invocation(struct r600_shader_ctx *ctx)
1385 {
1386 int r;
1387 struct r600_bytecode_alu alu;
1388
1389 /* do a vtx fetch with wqm set on the vtx fetch */
1390 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1391 alu.op = ALU_OP1_MOV;
1392 alu.dst.sel = ctx->helper_invoc_reg;
1393 alu.dst.chan = 0;
1394 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
1395 alu.src[0].value = 0xffffffff;
1396 alu.dst.write = 1;
1397 alu.last = 1;
1398 r = r600_bytecode_add_alu(ctx->bc, &alu);
1399 if (r)
1400 return r;
1401
1402 /* do a vtx fetch in VPM mode */
1403 struct r600_bytecode_vtx vtx;
1404 memset(&vtx, 0, sizeof(vtx));
1405 vtx.op = FETCH_OP_GET_BUFFER_RESINFO;
1406 vtx.buffer_id = R600_BUFFER_INFO_CONST_BUFFER;
1407 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1408 vtx.src_gpr = 0;
1409 vtx.mega_fetch_count = 16; /* no idea here really... */
1410 vtx.dst_gpr = ctx->helper_invoc_reg;
1411 vtx.dst_sel_x = 4;
1412 vtx.dst_sel_y = 7; /* SEL_Y */
1413 vtx.dst_sel_z = 7; /* SEL_Z */
1414 vtx.dst_sel_w = 7; /* SEL_W */
1415 vtx.data_format = FMT_32;
1416 if ((r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx)))
1417 return r;
1418 ctx->bc->cf_last->vpm = 1;
1419 return 0;
1420 }
1421
1422 static int cm_load_helper_invocation(struct r600_shader_ctx *ctx)
1423 {
1424 int r;
1425 struct r600_bytecode_alu alu;
1426
1427 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1428 alu.op = ALU_OP1_MOV;
1429 alu.dst.sel = ctx->helper_invoc_reg;
1430 alu.dst.chan = 0;
1431 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
1432 alu.src[0].value = 0xffffffff;
1433 alu.dst.write = 1;
1434 alu.last = 1;
1435 r = r600_bytecode_add_alu(ctx->bc, &alu);
1436 if (r)
1437 return r;
1438
1439 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1440 alu.op = ALU_OP1_MOV;
1441 alu.dst.sel = ctx->helper_invoc_reg;
1442 alu.dst.chan = 0;
1443 alu.src[0].sel = V_SQ_ALU_SRC_0;
1444 alu.dst.write = 1;
1445 alu.last = 1;
1446 r = r600_bytecode_add_alu_type(ctx->bc, &alu, CF_OP_ALU_VALID_PIXEL_MODE);
1447 if (r)
1448 return r;
1449
1450 return ctx->helper_invoc_reg;
1451 }
1452
1453 static int load_block_grid_size(struct r600_shader_ctx *ctx, bool load_block)
1454 {
1455 struct r600_bytecode_vtx vtx;
1456 int r, t1;
1457
1458 if (ctx->cs_block_size_loaded)
1459 return ctx->cs_block_size_reg;
1460 if (ctx->cs_grid_size_loaded)
1461 return ctx->cs_grid_size_reg;
1462
1463 t1 = load_block ? ctx->cs_block_size_reg : ctx->cs_grid_size_reg;
1464 struct r600_bytecode_alu alu;
1465 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1466 alu.op = ALU_OP1_MOV;
1467 alu.src[0].sel = V_SQ_ALU_SRC_0;
1468 alu.dst.sel = t1;
1469 alu.dst.write = 1;
1470 alu.last = 1;
1471 r = r600_bytecode_add_alu(ctx->bc, &alu);
1472 if (r)
1473 return r;
1474
1475 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
1476 vtx.op = FETCH_OP_VFETCH;
1477 vtx.buffer_id = R600_BUFFER_INFO_CONST_BUFFER;
1478 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1479 vtx.src_gpr = t1;
1480 vtx.src_sel_x = 0;
1481
1482 vtx.mega_fetch_count = 16;
1483 vtx.dst_gpr = t1;
1484 vtx.dst_sel_x = 0;
1485 vtx.dst_sel_y = 1;
1486 vtx.dst_sel_z = 2;
1487 vtx.dst_sel_w = 7;
1488 vtx.data_format = FMT_32_32_32_32;
1489 vtx.num_format_all = 1;
1490 vtx.format_comp_all = 0;
1491 vtx.use_const_fields = 0;
1492 vtx.offset = load_block ? 0 : 16; // first element is size of buffer
1493 vtx.endian = r600_endian_swap(32);
1494 vtx.srf_mode_all = 1; /* SRF_MODE_NO_ZERO */
1495
1496 r = r600_bytecode_add_vtx(ctx->bc, &vtx);
1497 if (r)
1498 return r;
1499
1500 if (load_block)
1501 ctx->cs_block_size_loaded = true;
1502 else
1503 ctx->cs_grid_size_loaded = true;
1504 return t1;
1505 }
1506
1507 static void tgsi_src(struct r600_shader_ctx *ctx,
1508 const struct tgsi_full_src_register *tgsi_src,
1509 struct r600_shader_src *r600_src)
1510 {
1511 memset(r600_src, 0, sizeof(*r600_src));
1512 r600_src->swizzle[0] = tgsi_src->Register.SwizzleX;
1513 r600_src->swizzle[1] = tgsi_src->Register.SwizzleY;
1514 r600_src->swizzle[2] = tgsi_src->Register.SwizzleZ;
1515 r600_src->swizzle[3] = tgsi_src->Register.SwizzleW;
1516 r600_src->neg = tgsi_src->Register.Negate;
1517 r600_src->abs = tgsi_src->Register.Absolute;
1518
1519 if (tgsi_src->Register.File == TGSI_FILE_TEMPORARY) {
1520 bool spilled;
1521 unsigned idx;
1522
1523 idx = map_tgsi_reg_index_to_r600_gpr(ctx, tgsi_src->Register.Index, &spilled);
1524
1525 if (spilled) {
1526 int reg = r600_get_temp(ctx);
1527 int r;
1528
1529 r600_src->sel = reg;
1530
1531 if (ctx->bc->chip_class < R700) {
1532 struct r600_bytecode_output cf;
1533
1534 memset(&cf, 0, sizeof(struct r600_bytecode_output));
1535 cf.op = CF_OP_MEM_SCRATCH;
1536 cf.elem_size = 3;
1537 cf.gpr = reg;
1538 cf.comp_mask = 0xF;
1539 cf.swizzle_x = 0;
1540 cf.swizzle_y = 1;
1541 cf.swizzle_z = 2;
1542 cf.swizzle_w = 3;
1543 cf.burst_count = 1;
1544
1545 get_spilled_array_base_and_size(ctx, tgsi_src->Register.Index,
1546 &cf.array_base, &cf.array_size);
1547
1548 if (tgsi_src->Register.Indirect) {
1549 cf.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_READ_IND;
1550 cf.index_gpr = ctx->bc->ar_reg;
1551 }
1552 else {
1553 cf.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_READ;
1554 cf.array_base += idx;
1555 cf.array_size = 0;
1556 }
1557
1558 r = r600_bytecode_add_output(ctx->bc, &cf);
1559 }
1560 else {
1561 struct r600_bytecode_vtx vtx;
1562
1563 if (r600_bytecode_get_need_wait_ack(ctx->bc)) {
1564 r600_bytecode_need_wait_ack(ctx->bc, false);
1565 r = r600_bytecode_add_cfinst(ctx->bc, CF_OP_WAIT_ACK);
1566 }
1567
1568 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
1569 vtx.op = FETCH_OP_READ_SCRATCH;
1570 vtx.dst_gpr = reg;
1571 vtx.uncached = 1; // Must bypass cache since prior spill written in same invocation
1572 vtx.elem_size = 3;
1573 vtx.data_format = FMT_32_32_32_32;
1574 vtx.num_format_all = V_038010_SQ_NUM_FORMAT_INT;
1575 vtx.dst_sel_x = tgsi_src->Register.SwizzleX;
1576 vtx.dst_sel_y = tgsi_src->Register.SwizzleY;
1577 vtx.dst_sel_z = tgsi_src->Register.SwizzleZ;
1578 vtx.dst_sel_w = tgsi_src->Register.SwizzleW;
1579
1580 get_spilled_array_base_and_size(ctx, tgsi_src->Register.Index,
1581 &vtx.array_base, &vtx.array_size);
1582
1583 if (tgsi_src->Register.Indirect) {
1584 vtx.indexed = 1;
1585 vtx.src_gpr = ctx->bc->ar_reg;
1586 }
1587 else {
1588 vtx.array_base += idx;
1589 vtx.array_size = 0;
1590 }
1591
1592 r = r600_bytecode_add_vtx(ctx->bc, &vtx);
1593 }
1594
1595 if (r)
1596 return;
1597 }
1598 else {
1599 if (tgsi_src->Register.Indirect)
1600 r600_src->rel = V_SQ_REL_RELATIVE;
1601
1602 r600_src->sel = idx;
1603 }
1604
1605 return;
1606 }
1607
1608 if (tgsi_src->Register.File == TGSI_FILE_IMMEDIATE) {
1609 int index;
1610 if ((tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleY) &&
1611 (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleZ) &&
1612 (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleW)) {
1613
1614 index = tgsi_src->Register.Index * 4 + tgsi_src->Register.SwizzleX;
1615 r600_bytecode_special_constants(ctx->literals[index], &r600_src->sel, &r600_src->neg, r600_src->abs);
1616 if (r600_src->sel != V_SQ_ALU_SRC_LITERAL)
1617 return;
1618 }
1619 index = tgsi_src->Register.Index;
1620 r600_src->sel = V_SQ_ALU_SRC_LITERAL;
1621 memcpy(r600_src->value, ctx->literals + index * 4, sizeof(r600_src->value));
1622 } else if (tgsi_src->Register.File == TGSI_FILE_SYSTEM_VALUE) {
1623 if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEMASK) {
1624 r600_src->swizzle[0] = 2; // Z value
1625 r600_src->swizzle[1] = 2;
1626 r600_src->swizzle[2] = 2;
1627 r600_src->swizzle[3] = 2;
1628 r600_src->sel = ctx->face_gpr;
1629 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEID) {
1630 r600_src->swizzle[0] = 3; // W value
1631 r600_src->swizzle[1] = 3;
1632 r600_src->swizzle[2] = 3;
1633 r600_src->swizzle[3] = 3;
1634 r600_src->sel = ctx->fixed_pt_position_gpr;
1635 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEPOS) {
1636 r600_src->swizzle[0] = 0;
1637 r600_src->swizzle[1] = 1;
1638 r600_src->swizzle[2] = 4;
1639 r600_src->swizzle[3] = 4;
1640 r600_src->sel = load_sample_position(ctx, NULL, -1);
1641 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INSTANCEID) {
1642 r600_src->swizzle[0] = 3;
1643 r600_src->swizzle[1] = 3;
1644 r600_src->swizzle[2] = 3;
1645 r600_src->swizzle[3] = 3;
1646 r600_src->sel = 0;
1647 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_VERTEXID) {
1648 r600_src->swizzle[0] = 0;
1649 r600_src->swizzle[1] = 0;
1650 r600_src->swizzle[2] = 0;
1651 r600_src->swizzle[3] = 0;
1652 r600_src->sel = 0;
1653 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_THREAD_ID) {
1654 r600_src->sel = 0;
1655 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_BLOCK_ID) {
1656 r600_src->sel = 1;
1657 } else if (ctx->type != PIPE_SHADER_TESS_CTRL && ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INVOCATIONID) {
1658 r600_src->swizzle[0] = 3;
1659 r600_src->swizzle[1] = 3;
1660 r600_src->swizzle[2] = 3;
1661 r600_src->swizzle[3] = 3;
1662 r600_src->sel = 1;
1663 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INVOCATIONID) {
1664 r600_src->swizzle[0] = 2;
1665 r600_src->swizzle[1] = 2;
1666 r600_src->swizzle[2] = 2;
1667 r600_src->swizzle[3] = 2;
1668 r600_src->sel = 0;
1669 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_TESSCOORD) {
1670 r600_src->sel = 1;
1671 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_TESSINNER) {
1672 r600_src->sel = 3;
1673 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_TESSOUTER) {
1674 r600_src->sel = 2;
1675 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_VERTICESIN) {
1676 if (ctx->type == PIPE_SHADER_TESS_CTRL) {
1677 r600_src->sel = ctx->tess_input_info;
1678 r600_src->swizzle[0] = 2;
1679 r600_src->swizzle[1] = 2;
1680 r600_src->swizzle[2] = 2;
1681 r600_src->swizzle[3] = 2;
1682 } else {
1683 r600_src->sel = ctx->tess_input_info;
1684 r600_src->swizzle[0] = 3;
1685 r600_src->swizzle[1] = 3;
1686 r600_src->swizzle[2] = 3;
1687 r600_src->swizzle[3] = 3;
1688 }
1689 } else if (ctx->type == PIPE_SHADER_TESS_CTRL && ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_PRIMID) {
1690 r600_src->sel = 0;
1691 r600_src->swizzle[0] = 0;
1692 r600_src->swizzle[1] = 0;
1693 r600_src->swizzle[2] = 0;
1694 r600_src->swizzle[3] = 0;
1695 } else if (ctx->type == PIPE_SHADER_TESS_EVAL && ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_PRIMID) {
1696 r600_src->sel = 0;
1697 r600_src->swizzle[0] = 3;
1698 r600_src->swizzle[1] = 3;
1699 r600_src->swizzle[2] = 3;
1700 r600_src->swizzle[3] = 3;
1701 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_GRID_SIZE) {
1702 r600_src->sel = load_block_grid_size(ctx, false);
1703 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_BLOCK_SIZE) {
1704 r600_src->sel = load_block_grid_size(ctx, true);
1705 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_HELPER_INVOCATION) {
1706 r600_src->sel = ctx->helper_invoc_reg;
1707 r600_src->swizzle[0] = 0;
1708 r600_src->swizzle[1] = 0;
1709 r600_src->swizzle[2] = 0;
1710 r600_src->swizzle[3] = 0;
1711 }
1712 } else {
1713 if (tgsi_src->Register.Indirect)
1714 r600_src->rel = V_SQ_REL_RELATIVE;
1715 r600_src->sel = tgsi_src->Register.Index;
1716 r600_src->sel += ctx->file_offset[tgsi_src->Register.File];
1717 }
1718 if (tgsi_src->Register.File == TGSI_FILE_CONSTANT) {
1719 if (tgsi_src->Register.Dimension) {
1720 r600_src->kc_bank = tgsi_src->Dimension.Index;
1721 if (tgsi_src->Dimension.Indirect) {
1722 r600_src->kc_rel = 1;
1723 }
1724 }
1725 }
1726 }
1727
1728 static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx,
1729 unsigned int cb_idx, unsigned cb_rel, unsigned int offset, unsigned ar_chan,
1730 unsigned int dst_reg)
1731 {
1732 struct r600_bytecode_vtx vtx;
1733 unsigned int ar_reg;
1734 int r;
1735
1736 if (offset) {
1737 struct r600_bytecode_alu alu;
1738
1739 memset(&alu, 0, sizeof(alu));
1740
1741 alu.op = ALU_OP2_ADD_INT;
1742 alu.src[0].sel = ctx->bc->ar_reg;
1743 alu.src[0].chan = ar_chan;
1744
1745 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
1746 alu.src[1].value = offset;
1747
1748 alu.dst.sel = dst_reg;
1749 alu.dst.chan = ar_chan;
1750 alu.dst.write = 1;
1751 alu.last = 1;
1752
1753 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1754 return r;
1755
1756 ar_reg = dst_reg;
1757 } else {
1758 ar_reg = ctx->bc->ar_reg;
1759 }
1760
1761 memset(&vtx, 0, sizeof(vtx));
1762 vtx.buffer_id = cb_idx;
1763 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1764 vtx.src_gpr = ar_reg;
1765 vtx.src_sel_x = ar_chan;
1766 vtx.mega_fetch_count = 16;
1767 vtx.dst_gpr = dst_reg;
1768 vtx.dst_sel_x = 0; /* SEL_X */
1769 vtx.dst_sel_y = 1; /* SEL_Y */
1770 vtx.dst_sel_z = 2; /* SEL_Z */
1771 vtx.dst_sel_w = 3; /* SEL_W */
1772 vtx.data_format = FMT_32_32_32_32_FLOAT;
1773 vtx.num_format_all = 2; /* NUM_FORMAT_SCALED */
1774 vtx.format_comp_all = 1; /* FORMAT_COMP_SIGNED */
1775 vtx.endian = r600_endian_swap(32);
1776 vtx.buffer_index_mode = cb_rel; // cb_rel ? V_SQ_CF_INDEX_0 : V_SQ_CF_INDEX_NONE;
1777
1778 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
1779 return r;
1780
1781 return 0;
1782 }
1783
1784 static int fetch_gs_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg)
1785 {
1786 struct r600_bytecode_vtx vtx;
1787 int r;
1788 unsigned index = src->Register.Index;
1789 unsigned vtx_id = src->Dimension.Index;
1790 int offset_reg = ctx->gs_rotated_input[vtx_id / 3];
1791 int offset_chan = vtx_id % 3;
1792 int t2 = 0;
1793
1794 /* offsets of per-vertex data in ESGS ring are passed to GS in R0.x, R0.y,
1795 * R0.w, R1.x, R1.y, R1.z (it seems R0.z is used for PrimitiveID) */
1796
1797 if (offset_reg == ctx->gs_rotated_input[0] && offset_chan == 2)
1798 offset_chan = 3;
1799
1800 if (src->Dimension.Indirect || src->Register.Indirect)
1801 t2 = r600_get_temp(ctx);
1802
1803 if (src->Dimension.Indirect) {
1804 int treg[3];
1805 struct r600_bytecode_alu alu;
1806 int r, i;
1807 unsigned addr_reg;
1808 addr_reg = get_address_file_reg(ctx, src->DimIndirect.Index);
1809 if (src->DimIndirect.Index > 0) {
1810 r = single_alu_op2(ctx, ALU_OP1_MOV,
1811 ctx->bc->ar_reg, 0,
1812 addr_reg, 0,
1813 0, 0);
1814 if (r)
1815 return r;
1816 }
1817 /*
1818 we have to put the R0.x/y/w into Rt.x Rt+1.x Rt+2.x then index reg from Rt.
1819 at least this is what fglrx seems to do. */
1820 for (i = 0; i < 3; i++) {
1821 treg[i] = r600_get_temp(ctx);
1822 }
1823 r600_add_gpr_array(ctx->shader, treg[0], 3, 0x0F);
1824
1825 for (i = 0; i < 3; i++) {
1826 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1827 alu.op = ALU_OP1_MOV;
1828 alu.src[0].sel = ctx->gs_rotated_input[0];
1829 alu.src[0].chan = i == 2 ? 3 : i;
1830 alu.dst.sel = treg[i];
1831 alu.dst.chan = 0;
1832 alu.dst.write = 1;
1833 alu.last = 1;
1834 r = r600_bytecode_add_alu(ctx->bc, &alu);
1835 if (r)
1836 return r;
1837 }
1838 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1839 alu.op = ALU_OP1_MOV;
1840 alu.src[0].sel = treg[0];
1841 alu.src[0].rel = 1;
1842 alu.dst.sel = t2;
1843 alu.dst.write = 1;
1844 alu.last = 1;
1845 r = r600_bytecode_add_alu(ctx->bc, &alu);
1846 if (r)
1847 return r;
1848 offset_reg = t2;
1849 offset_chan = 0;
1850 }
1851
1852 if (src->Register.Indirect) {
1853 int addr_reg;
1854 unsigned first = ctx->info.input_array_first[src->Indirect.ArrayID];
1855
1856 addr_reg = get_address_file_reg(ctx, src->Indirect.Index);
1857
1858 /* pull the value from index_reg */
1859 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
1860 t2, 1,
1861 addr_reg, 0,
1862 V_SQ_ALU_SRC_LITERAL, first);
1863 if (r)
1864 return r;
1865 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
1866 t2, 0,
1867 t2, 1,
1868 V_SQ_ALU_SRC_LITERAL, 4,
1869 offset_reg, offset_chan);
1870 if (r)
1871 return r;
1872 offset_reg = t2;
1873 offset_chan = 0;
1874 index = src->Register.Index - first;
1875 }
1876
1877 memset(&vtx, 0, sizeof(vtx));
1878 vtx.buffer_id = R600_GS_RING_CONST_BUFFER;
1879 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1880 vtx.src_gpr = offset_reg;
1881 vtx.src_sel_x = offset_chan;
1882 vtx.offset = index * 16; /*bytes*/
1883 vtx.mega_fetch_count = 16;
1884 vtx.dst_gpr = dst_reg;
1885 vtx.dst_sel_x = 0; /* SEL_X */
1886 vtx.dst_sel_y = 1; /* SEL_Y */
1887 vtx.dst_sel_z = 2; /* SEL_Z */
1888 vtx.dst_sel_w = 3; /* SEL_W */
1889 if (ctx->bc->chip_class >= EVERGREEN) {
1890 vtx.use_const_fields = 1;
1891 } else {
1892 vtx.data_format = FMT_32_32_32_32_FLOAT;
1893 }
1894
1895 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
1896 return r;
1897
1898 return 0;
1899 }
1900
1901 static int tgsi_split_gs_inputs(struct r600_shader_ctx *ctx)
1902 {
1903 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1904 unsigned i;
1905
1906 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
1907 struct tgsi_full_src_register *src = &inst->Src[i];
1908
1909 if (src->Register.File == TGSI_FILE_INPUT) {
1910 if (ctx->shader->input[src->Register.Index].name == TGSI_SEMANTIC_PRIMID) {
1911 /* primitive id is in R0.z */
1912 ctx->src[i].sel = 0;
1913 ctx->src[i].swizzle[0] = 2;
1914 }
1915 }
1916 if (src->Register.File == TGSI_FILE_INPUT && src->Register.Dimension) {
1917 int treg = r600_get_temp(ctx);
1918
1919 fetch_gs_input(ctx, src, treg);
1920 ctx->src[i].sel = treg;
1921 ctx->src[i].rel = 0;
1922 }
1923 }
1924 return 0;
1925 }
1926
1927
1928 /* Tessellation shaders pass outputs to the next shader using LDS.
1929 *
1930 * LS outputs = TCS(HS) inputs
1931 * TCS(HS) outputs = TES(DS) inputs
1932 *
1933 * The LDS layout is:
1934 * - TCS inputs for patch 0
1935 * - TCS inputs for patch 1
1936 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
1937 * - ...
1938 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
1939 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
1940 * - TCS outputs for patch 1
1941 * - Per-patch TCS outputs for patch 1
1942 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
1943 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
1944 * - ...
1945 *
1946 * All three shaders VS(LS), TCS, TES share the same LDS space.
1947 */
1948 /* this will return with the dw address in temp_reg.x */
1949 static int r600_get_byte_address(struct r600_shader_ctx *ctx, int temp_reg,
1950 const struct tgsi_full_dst_register *dst,
1951 const struct tgsi_full_src_register *src,
1952 int stride_bytes_reg, int stride_bytes_chan)
1953 {
1954 struct tgsi_full_dst_register reg;
1955 ubyte *name, *index, *array_first;
1956 int r;
1957 int param;
1958 struct tgsi_shader_info *info = &ctx->info;
1959 /* Set the register description. The address computation is the same
1960 * for sources and destinations. */
1961 if (src) {
1962 reg.Register.File = src->Register.File;
1963 reg.Register.Index = src->Register.Index;
1964 reg.Register.Indirect = src->Register.Indirect;
1965 reg.Register.Dimension = src->Register.Dimension;
1966 reg.Indirect = src->Indirect;
1967 reg.Dimension = src->Dimension;
1968 reg.DimIndirect = src->DimIndirect;
1969 } else
1970 reg = *dst;
1971
1972 /* If the register is 2-dimensional (e.g. an array of vertices
1973 * in a primitive), calculate the base address of the vertex. */
1974 if (reg.Register.Dimension) {
1975 int sel, chan;
1976 if (reg.Dimension.Indirect) {
1977 unsigned addr_reg;
1978 assert (reg.DimIndirect.File == TGSI_FILE_ADDRESS);
1979
1980 addr_reg = get_address_file_reg(ctx, reg.DimIndirect.Index);
1981 /* pull the value from index_reg */
1982 sel = addr_reg;
1983 chan = 0;
1984 } else {
1985 sel = V_SQ_ALU_SRC_LITERAL;
1986 chan = reg.Dimension.Index;
1987 }
1988
1989 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
1990 temp_reg, 0,
1991 stride_bytes_reg, stride_bytes_chan,
1992 sel, chan,
1993 temp_reg, 0);
1994 if (r)
1995 return r;
1996 }
1997
1998 if (reg.Register.File == TGSI_FILE_INPUT) {
1999 name = info->input_semantic_name;
2000 index = info->input_semantic_index;
2001 array_first = info->input_array_first;
2002 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
2003 name = info->output_semantic_name;
2004 index = info->output_semantic_index;
2005 array_first = info->output_array_first;
2006 } else {
2007 assert(0);
2008 return -1;
2009 }
2010 if (reg.Register.Indirect) {
2011 int addr_reg;
2012 int first;
2013 /* Add the relative address of the element. */
2014 if (reg.Indirect.ArrayID)
2015 first = array_first[reg.Indirect.ArrayID];
2016 else
2017 first = reg.Register.Index;
2018
2019 addr_reg = get_address_file_reg(ctx, reg.Indirect.Index);
2020
2021 /* pull the value from index_reg */
2022 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
2023 temp_reg, 0,
2024 V_SQ_ALU_SRC_LITERAL, 16,
2025 addr_reg, 0,
2026 temp_reg, 0);
2027 if (r)
2028 return r;
2029
2030 param = r600_get_lds_unique_index(name[first],
2031 index[first]);
2032
2033 } else {
2034 param = r600_get_lds_unique_index(name[reg.Register.Index],
2035 index[reg.Register.Index]);
2036 }
2037
2038 /* add to base_addr - passed in temp_reg.x */
2039 if (param) {
2040 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
2041 temp_reg, 0,
2042 temp_reg, 0,
2043 V_SQ_ALU_SRC_LITERAL, param * 16);
2044 if (r)
2045 return r;
2046
2047 }
2048 return 0;
2049 }
2050
2051 static int do_lds_fetch_values(struct r600_shader_ctx *ctx, unsigned temp_reg,
2052 unsigned dst_reg, unsigned mask)
2053 {
2054 struct r600_bytecode_alu alu;
2055 int r, i, lasti;
2056
2057 if ((ctx->bc->cf_last->ndw>>1) >= 0x60)
2058 ctx->bc->force_add_cf = 1;
2059
2060 lasti = tgsi_last_instruction(mask);
2061 for (i = 1; i <= lasti; i++) {
2062 if (!(mask & (1 << i)))
2063 continue;
2064
2065 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
2066 temp_reg, i,
2067 temp_reg, 0,
2068 V_SQ_ALU_SRC_LITERAL, 4 * i);
2069 if (r)
2070 return r;
2071 }
2072 for (i = 0; i <= lasti; i++) {
2073 if (!(mask & (1 << i)))
2074 continue;
2075
2076 /* emit an LDS_READ_RET */
2077 memset(&alu, 0, sizeof(alu));
2078 alu.op = LDS_OP1_LDS_READ_RET;
2079 alu.src[0].sel = temp_reg;
2080 alu.src[0].chan = i;
2081 alu.src[1].sel = V_SQ_ALU_SRC_0;
2082 alu.src[2].sel = V_SQ_ALU_SRC_0;
2083 alu.dst.chan = 0;
2084 alu.is_lds_idx_op = true;
2085 alu.last = 1;
2086 r = r600_bytecode_add_alu(ctx->bc, &alu);
2087 if (r)
2088 return r;
2089 }
2090 for (i = 0; i <= lasti; i++) {
2091 if (!(mask & (1 << i)))
2092 continue;
2093
2094 /* then read from LDS_OQ_A_POP */
2095 memset(&alu, 0, sizeof(alu));
2096
2097 alu.op = ALU_OP1_MOV;
2098 alu.src[0].sel = EG_V_SQ_ALU_SRC_LDS_OQ_A_POP;
2099 alu.src[0].chan = 0;
2100 alu.dst.sel = dst_reg;
2101 alu.dst.chan = i;
2102 alu.dst.write = 1;
2103 alu.last = 1;
2104 r = r600_bytecode_add_alu(ctx->bc, &alu);
2105 if (r)
2106 return r;
2107 }
2108 return 0;
2109 }
2110
2111 static int fetch_mask(struct tgsi_src_register *reg)
2112 {
2113 int mask = 0;
2114 mask |= 1 << reg->SwizzleX;
2115 mask |= 1 << reg->SwizzleY;
2116 mask |= 1 << reg->SwizzleZ;
2117 mask |= 1 << reg->SwizzleW;
2118 return mask;
2119 }
2120
2121 static int fetch_tes_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg)
2122 {
2123 int r;
2124 unsigned temp_reg = r600_get_temp(ctx);
2125
2126 r = get_lds_offset0(ctx, 2, temp_reg,
2127 src->Register.Dimension ? false : true);
2128 if (r)
2129 return r;
2130
2131 /* the base address is now in temp.x */
2132 r = r600_get_byte_address(ctx, temp_reg,
2133 NULL, src, ctx->tess_output_info, 1);
2134 if (r)
2135 return r;
2136
2137 r = do_lds_fetch_values(ctx, temp_reg, dst_reg, fetch_mask(&src->Register));
2138 if (r)
2139 return r;
2140 return 0;
2141 }
2142
2143 static int fetch_tcs_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg)
2144 {
2145 int r;
2146 unsigned temp_reg = r600_get_temp(ctx);
2147
2148 /* t.x = ips * r0.y */
2149 r = single_alu_op2(ctx, ALU_OP2_MUL_UINT24,
2150 temp_reg, 0,
2151 ctx->tess_input_info, 0,
2152 0, 1);
2153
2154 if (r)
2155 return r;
2156
2157 /* the base address is now in temp.x */
2158 r = r600_get_byte_address(ctx, temp_reg,
2159 NULL, src, ctx->tess_input_info, 1);
2160 if (r)
2161 return r;
2162
2163 r = do_lds_fetch_values(ctx, temp_reg, dst_reg, fetch_mask(&src->Register));
2164 if (r)
2165 return r;
2166 return 0;
2167 }
2168
2169 static int fetch_tcs_output(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg)
2170 {
2171 int r;
2172 unsigned temp_reg = r600_get_temp(ctx);
2173
2174 r = get_lds_offset0(ctx, 1, temp_reg,
2175 src->Register.Dimension ? false : true);
2176 if (r)
2177 return r;
2178 /* the base address is now in temp.x */
2179 r = r600_get_byte_address(ctx, temp_reg,
2180 NULL, src,
2181 ctx->tess_output_info, 1);
2182 if (r)
2183 return r;
2184
2185 r = do_lds_fetch_values(ctx, temp_reg, dst_reg, fetch_mask(&src->Register));
2186 if (r)
2187 return r;
2188 return 0;
2189 }
2190
2191 static int tgsi_split_lds_inputs(struct r600_shader_ctx *ctx)
2192 {
2193 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2194 unsigned i;
2195
2196 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
2197 struct tgsi_full_src_register *src = &inst->Src[i];
2198
2199 if (ctx->type == PIPE_SHADER_TESS_EVAL && src->Register.File == TGSI_FILE_INPUT) {
2200 int treg = r600_get_temp(ctx);
2201 fetch_tes_input(ctx, src, treg);
2202 ctx->src[i].sel = treg;
2203 ctx->src[i].rel = 0;
2204 }
2205 if (ctx->type == PIPE_SHADER_TESS_CTRL && src->Register.File == TGSI_FILE_INPUT) {
2206 int treg = r600_get_temp(ctx);
2207 fetch_tcs_input(ctx, src, treg);
2208 ctx->src[i].sel = treg;
2209 ctx->src[i].rel = 0;
2210 }
2211 if (ctx->type == PIPE_SHADER_TESS_CTRL && src->Register.File == TGSI_FILE_OUTPUT) {
2212 int treg = r600_get_temp(ctx);
2213 fetch_tcs_output(ctx, src, treg);
2214 ctx->src[i].sel = treg;
2215 ctx->src[i].rel = 0;
2216 }
2217 }
2218 return 0;
2219 }
2220
2221 static int tgsi_split_constant(struct r600_shader_ctx *ctx)
2222 {
2223 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2224 struct r600_bytecode_alu alu;
2225 int i, j, k, nconst, r;
2226
2227 for (i = 0, nconst = 0; i < inst->Instruction.NumSrcRegs; i++) {
2228 if (inst->Src[i].Register.File == TGSI_FILE_CONSTANT) {
2229 nconst++;
2230 }
2231 tgsi_src(ctx, &inst->Src[i], &ctx->src[i]);
2232 }
2233 for (i = 0, j = nconst - 1; i < inst->Instruction.NumSrcRegs; i++) {
2234 if (inst->Src[i].Register.File != TGSI_FILE_CONSTANT) {
2235 continue;
2236 }
2237
2238 if (ctx->src[i].rel) {
2239 int chan = inst->Src[i].Indirect.Swizzle;
2240 int treg = r600_get_temp(ctx);
2241 if ((r = tgsi_fetch_rel_const(ctx, ctx->src[i].kc_bank, ctx->src[i].kc_rel, ctx->src[i].sel - 512, chan, treg)))
2242 return r;
2243
2244 ctx->src[i].kc_bank = 0;
2245 ctx->src[i].kc_rel = 0;
2246 ctx->src[i].sel = treg;
2247 ctx->src[i].rel = 0;
2248 j--;
2249 } else if (j > 0) {
2250 int treg = r600_get_temp(ctx);
2251 for (k = 0; k < 4; k++) {
2252 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2253 alu.op = ALU_OP1_MOV;
2254 alu.src[0].sel = ctx->src[i].sel;
2255 alu.src[0].chan = k;
2256 alu.src[0].rel = ctx->src[i].rel;
2257 alu.src[0].kc_bank = ctx->src[i].kc_bank;
2258 alu.src[0].kc_rel = ctx->src[i].kc_rel;
2259 alu.dst.sel = treg;
2260 alu.dst.chan = k;
2261 alu.dst.write = 1;
2262 if (k == 3)
2263 alu.last = 1;
2264 r = r600_bytecode_add_alu(ctx->bc, &alu);
2265 if (r)
2266 return r;
2267 }
2268 ctx->src[i].sel = treg;
2269 ctx->src[i].rel =0;
2270 j--;
2271 }
2272 }
2273 return 0;
2274 }
2275
2276 /* need to move any immediate into a temp - for trig functions which use literal for PI stuff */
2277 static int tgsi_split_literal_constant(struct r600_shader_ctx *ctx)
2278 {
2279 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2280 struct r600_bytecode_alu alu;
2281 int i, j, k, nliteral, r;
2282
2283 for (i = 0, nliteral = 0; i < inst->Instruction.NumSrcRegs; i++) {
2284 if (ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
2285 nliteral++;
2286 }
2287 }
2288 for (i = 0, j = nliteral - 1; i < inst->Instruction.NumSrcRegs; i++) {
2289 if (j > 0 && ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
2290 int treg = r600_get_temp(ctx);
2291 for (k = 0; k < 4; k++) {
2292 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2293 alu.op = ALU_OP1_MOV;
2294 alu.src[0].sel = ctx->src[i].sel;
2295 alu.src[0].chan = k;
2296 alu.src[0].value = ctx->src[i].value[k];
2297 alu.dst.sel = treg;
2298 alu.dst.chan = k;
2299 alu.dst.write = 1;
2300 if (k == 3)
2301 alu.last = 1;
2302 r = r600_bytecode_add_alu(ctx->bc, &alu);
2303 if (r)
2304 return r;
2305 }
2306 ctx->src[i].sel = treg;
2307 j--;
2308 }
2309 }
2310 return 0;
2311 }
2312
2313 static int process_twoside_color_inputs(struct r600_shader_ctx *ctx)
2314 {
2315 int i, r, count = ctx->shader->ninput;
2316
2317 for (i = 0; i < count; i++) {
2318 if (ctx->shader->input[i].name == TGSI_SEMANTIC_COLOR) {
2319 r = select_twoside_color(ctx, i, ctx->shader->input[i].back_color_input);
2320 if (r)
2321 return r;
2322 }
2323 }
2324 return 0;
2325 }
2326
2327 static int emit_streamout(struct r600_shader_ctx *ctx, struct pipe_stream_output_info *so,
2328 int stream, unsigned *stream_item_size UNUSED)
2329 {
2330 unsigned so_gpr[PIPE_MAX_SHADER_OUTPUTS];
2331 unsigned start_comp[PIPE_MAX_SHADER_OUTPUTS];
2332 int j, r;
2333 unsigned i;
2334
2335 /* Sanity checking. */
2336 if (so->num_outputs > PIPE_MAX_SO_OUTPUTS) {
2337 R600_ERR("Too many stream outputs: %d\n", so->num_outputs);
2338 r = -EINVAL;
2339 goto out_err;
2340 }
2341 for (i = 0; i < so->num_outputs; i++) {
2342 if (so->output[i].output_buffer >= 4) {
2343 R600_ERR("Exceeded the max number of stream output buffers, got: %d\n",
2344 so->output[i].output_buffer);
2345 r = -EINVAL;
2346 goto out_err;
2347 }
2348 }
2349
2350 /* Initialize locations where the outputs are stored. */
2351 for (i = 0; i < so->num_outputs; i++) {
2352
2353 so_gpr[i] = ctx->shader->output[so->output[i].register_index].gpr;
2354 start_comp[i] = so->output[i].start_component;
2355 /* Lower outputs with dst_offset < start_component.
2356 *
2357 * We can only output 4D vectors with a write mask, e.g. we can
2358 * only output the W component at offset 3, etc. If we want
2359 * to store Y, Z, or W at buffer offset 0, we need to use MOV
2360 * to move it to X and output X. */
2361 if (so->output[i].dst_offset < so->output[i].start_component) {
2362 unsigned tmp = r600_get_temp(ctx);
2363
2364 for (j = 0; j < so->output[i].num_components; j++) {
2365 struct r600_bytecode_alu alu;
2366 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2367 alu.op = ALU_OP1_MOV;
2368 alu.src[0].sel = so_gpr[i];
2369 alu.src[0].chan = so->output[i].start_component + j;
2370
2371 alu.dst.sel = tmp;
2372 alu.dst.chan = j;
2373 alu.dst.write = 1;
2374 if (j == so->output[i].num_components - 1)
2375 alu.last = 1;
2376 r = r600_bytecode_add_alu(ctx->bc, &alu);
2377 if (r)
2378 return r;
2379 }
2380 start_comp[i] = 0;
2381 so_gpr[i] = tmp;
2382 }
2383 }
2384
2385 /* Write outputs to buffers. */
2386 for (i = 0; i < so->num_outputs; i++) {
2387 struct r600_bytecode_output output;
2388
2389 if (stream != -1 && stream != so->output[i].stream)
2390 continue;
2391
2392 memset(&output, 0, sizeof(struct r600_bytecode_output));
2393 output.gpr = so_gpr[i];
2394 output.elem_size = so->output[i].num_components - 1;
2395 if (output.elem_size == 2)
2396 output.elem_size = 3; // 3 not supported, write 4 with junk at end
2397 output.array_base = so->output[i].dst_offset - start_comp[i];
2398 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE;
2399 output.burst_count = 1;
2400 /* array_size is an upper limit for the burst_count
2401 * with MEM_STREAM instructions */
2402 output.array_size = 0xFFF;
2403 output.comp_mask = ((1 << so->output[i].num_components) - 1) << start_comp[i];
2404
2405 if (ctx->bc->chip_class >= EVERGREEN) {
2406 switch (so->output[i].output_buffer) {
2407 case 0:
2408 output.op = CF_OP_MEM_STREAM0_BUF0;
2409 break;
2410 case 1:
2411 output.op = CF_OP_MEM_STREAM0_BUF1;
2412 break;
2413 case 2:
2414 output.op = CF_OP_MEM_STREAM0_BUF2;
2415 break;
2416 case 3:
2417 output.op = CF_OP_MEM_STREAM0_BUF3;
2418 break;
2419 }
2420 output.op += so->output[i].stream * 4;
2421 assert(output.op >= CF_OP_MEM_STREAM0_BUF0 && output.op <= CF_OP_MEM_STREAM3_BUF3);
2422 ctx->enabled_stream_buffers_mask |= (1 << so->output[i].output_buffer) << so->output[i].stream * 4;
2423 } else {
2424 switch (so->output[i].output_buffer) {
2425 case 0:
2426 output.op = CF_OP_MEM_STREAM0;
2427 break;
2428 case 1:
2429 output.op = CF_OP_MEM_STREAM1;
2430 break;
2431 case 2:
2432 output.op = CF_OP_MEM_STREAM2;
2433 break;
2434 case 3:
2435 output.op = CF_OP_MEM_STREAM3;
2436 break;
2437 }
2438 ctx->enabled_stream_buffers_mask |= 1 << so->output[i].output_buffer;
2439 }
2440 r = r600_bytecode_add_output(ctx->bc, &output);
2441 if (r)
2442 goto out_err;
2443 }
2444 return 0;
2445 out_err:
2446 return r;
2447 }
2448
2449 static void convert_edgeflag_to_int(struct r600_shader_ctx *ctx)
2450 {
2451 struct r600_bytecode_alu alu;
2452 unsigned reg;
2453
2454 if (!ctx->shader->vs_out_edgeflag)
2455 return;
2456
2457 reg = ctx->shader->output[ctx->edgeflag_output].gpr;
2458
2459 /* clamp(x, 0, 1) */
2460 memset(&alu, 0, sizeof(alu));
2461 alu.op = ALU_OP1_MOV;
2462 alu.src[0].sel = reg;
2463 alu.dst.sel = reg;
2464 alu.dst.write = 1;
2465 alu.dst.clamp = 1;
2466 alu.last = 1;
2467 r600_bytecode_add_alu(ctx->bc, &alu);
2468
2469 memset(&alu, 0, sizeof(alu));
2470 alu.op = ALU_OP1_FLT_TO_INT;
2471 alu.src[0].sel = reg;
2472 alu.dst.sel = reg;
2473 alu.dst.write = 1;
2474 alu.last = 1;
2475 r600_bytecode_add_alu(ctx->bc, &alu);
2476 }
2477
2478 static int generate_gs_copy_shader(struct r600_context *rctx,
2479 struct r600_pipe_shader *gs,
2480 struct pipe_stream_output_info *so)
2481 {
2482 struct r600_shader_ctx ctx = {};
2483 struct r600_shader *gs_shader = &gs->shader;
2484 struct r600_pipe_shader *cshader;
2485 unsigned ocnt = gs_shader->noutput;
2486 struct r600_bytecode_alu alu;
2487 struct r600_bytecode_vtx vtx;
2488 struct r600_bytecode_output output;
2489 struct r600_bytecode_cf *cf_jump, *cf_pop,
2490 *last_exp_pos = NULL, *last_exp_param = NULL;
2491 int next_clip_pos = 61, next_param = 0;
2492 unsigned i, j;
2493 int ring;
2494 bool only_ring_0 = true;
2495 cshader = calloc(1, sizeof(struct r600_pipe_shader));
2496 if (!cshader)
2497 return 0;
2498
2499 memcpy(cshader->shader.output, gs_shader->output, ocnt *
2500 sizeof(struct r600_shader_io));
2501
2502 cshader->shader.noutput = ocnt;
2503
2504 ctx.shader = &cshader->shader;
2505 ctx.bc = &ctx.shader->bc;
2506 ctx.type = ctx.bc->type = PIPE_SHADER_VERTEX;
2507
2508 r600_bytecode_init(ctx.bc, rctx->b.chip_class, rctx->b.family,
2509 rctx->screen->has_compressed_msaa_texturing);
2510
2511 ctx.bc->isa = rctx->isa;
2512
2513 cf_jump = NULL;
2514 memset(cshader->shader.ring_item_sizes, 0, sizeof(cshader->shader.ring_item_sizes));
2515
2516 /* R0.x = R0.x & 0x3fffffff */
2517 memset(&alu, 0, sizeof(alu));
2518 alu.op = ALU_OP2_AND_INT;
2519 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2520 alu.src[1].value = 0x3fffffff;
2521 alu.dst.write = 1;
2522 r600_bytecode_add_alu(ctx.bc, &alu);
2523
2524 /* R0.y = R0.x >> 30 */
2525 memset(&alu, 0, sizeof(alu));
2526 alu.op = ALU_OP2_LSHR_INT;
2527 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2528 alu.src[1].value = 0x1e;
2529 alu.dst.chan = 1;
2530 alu.dst.write = 1;
2531 alu.last = 1;
2532 r600_bytecode_add_alu(ctx.bc, &alu);
2533
2534 /* fetch vertex data from GSVS ring */
2535 for (i = 0; i < ocnt; ++i) {
2536 struct r600_shader_io *out = &ctx.shader->output[i];
2537
2538 out->gpr = i + 1;
2539 out->ring_offset = i * 16;
2540
2541 memset(&vtx, 0, sizeof(vtx));
2542 vtx.op = FETCH_OP_VFETCH;
2543 vtx.buffer_id = R600_GS_RING_CONST_BUFFER;
2544 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
2545 vtx.mega_fetch_count = 16;
2546 vtx.offset = out->ring_offset;
2547 vtx.dst_gpr = out->gpr;
2548 vtx.src_gpr = 0;
2549 vtx.dst_sel_x = 0;
2550 vtx.dst_sel_y = 1;
2551 vtx.dst_sel_z = 2;
2552 vtx.dst_sel_w = 3;
2553 if (rctx->b.chip_class >= EVERGREEN) {
2554 vtx.use_const_fields = 1;
2555 } else {
2556 vtx.data_format = FMT_32_32_32_32_FLOAT;
2557 }
2558
2559 r600_bytecode_add_vtx(ctx.bc, &vtx);
2560 }
2561 ctx.temp_reg = i + 1;
2562 for (ring = 3; ring >= 0; --ring) {
2563 bool enabled = false;
2564 for (i = 0; i < so->num_outputs; i++) {
2565 if (so->output[i].stream == ring) {
2566 enabled = true;
2567 if (ring > 0)
2568 only_ring_0 = false;
2569 break;
2570 }
2571 }
2572 if (ring != 0 && !enabled) {
2573 cshader->shader.ring_item_sizes[ring] = 0;
2574 continue;
2575 }
2576
2577 if (cf_jump) {
2578 // Patch up jump label
2579 r600_bytecode_add_cfinst(ctx.bc, CF_OP_POP);
2580 cf_pop = ctx.bc->cf_last;
2581
2582 cf_jump->cf_addr = cf_pop->id + 2;
2583 cf_jump->pop_count = 1;
2584 cf_pop->cf_addr = cf_pop->id + 2;
2585 cf_pop->pop_count = 1;
2586 }
2587
2588 /* PRED_SETE_INT __, R0.y, ring */
2589 memset(&alu, 0, sizeof(alu));
2590 alu.op = ALU_OP2_PRED_SETE_INT;
2591 alu.src[0].chan = 1;
2592 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2593 alu.src[1].value = ring;
2594 alu.execute_mask = 1;
2595 alu.update_pred = 1;
2596 alu.last = 1;
2597 r600_bytecode_add_alu_type(ctx.bc, &alu, CF_OP_ALU_PUSH_BEFORE);
2598
2599 r600_bytecode_add_cfinst(ctx.bc, CF_OP_JUMP);
2600 cf_jump = ctx.bc->cf_last;
2601
2602 if (enabled)
2603 emit_streamout(&ctx, so, only_ring_0 ? -1 : ring, &cshader->shader.ring_item_sizes[ring]);
2604 cshader->shader.ring_item_sizes[ring] = ocnt * 16;
2605 }
2606
2607 /* bc adds nops - copy it */
2608 if (ctx.bc->chip_class == R600) {
2609 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2610 alu.op = ALU_OP0_NOP;
2611 alu.last = 1;
2612 r600_bytecode_add_alu(ctx.bc, &alu);
2613
2614 r600_bytecode_add_cfinst(ctx.bc, CF_OP_NOP);
2615 }
2616
2617 /* export vertex data */
2618 /* XXX factor out common code with r600_shader_from_tgsi ? */
2619 for (i = 0; i < ocnt; ++i) {
2620 struct r600_shader_io *out = &ctx.shader->output[i];
2621 bool instream0 = true;
2622 if (out->name == TGSI_SEMANTIC_CLIPVERTEX)
2623 continue;
2624
2625 for (j = 0; j < so->num_outputs; j++) {
2626 if (so->output[j].register_index == i) {
2627 if (so->output[j].stream == 0)
2628 break;
2629 if (so->output[j].stream > 0)
2630 instream0 = false;
2631 }
2632 }
2633 if (!instream0)
2634 continue;
2635 memset(&output, 0, sizeof(output));
2636 output.gpr = out->gpr;
2637 output.elem_size = 3;
2638 output.swizzle_x = 0;
2639 output.swizzle_y = 1;
2640 output.swizzle_z = 2;
2641 output.swizzle_w = 3;
2642 output.burst_count = 1;
2643 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2644 output.op = CF_OP_EXPORT;
2645 switch (out->name) {
2646 case TGSI_SEMANTIC_POSITION:
2647 output.array_base = 60;
2648 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2649 break;
2650
2651 case TGSI_SEMANTIC_PSIZE:
2652 output.array_base = 61;
2653 if (next_clip_pos == 61)
2654 next_clip_pos = 62;
2655 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2656 output.swizzle_y = 7;
2657 output.swizzle_z = 7;
2658 output.swizzle_w = 7;
2659 ctx.shader->vs_out_misc_write = 1;
2660 ctx.shader->vs_out_point_size = 1;
2661 break;
2662 case TGSI_SEMANTIC_LAYER:
2663 if (out->spi_sid) {
2664 /* duplicate it as PARAM to pass to the pixel shader */
2665 output.array_base = next_param++;
2666 r600_bytecode_add_output(ctx.bc, &output);
2667 last_exp_param = ctx.bc->cf_last;
2668 }
2669 output.array_base = 61;
2670 if (next_clip_pos == 61)
2671 next_clip_pos = 62;
2672 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2673 output.swizzle_x = 7;
2674 output.swizzle_y = 7;
2675 output.swizzle_z = 0;
2676 output.swizzle_w = 7;
2677 ctx.shader->vs_out_misc_write = 1;
2678 ctx.shader->vs_out_layer = 1;
2679 break;
2680 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2681 if (out->spi_sid) {
2682 /* duplicate it as PARAM to pass to the pixel shader */
2683 output.array_base = next_param++;
2684 r600_bytecode_add_output(ctx.bc, &output);
2685 last_exp_param = ctx.bc->cf_last;
2686 }
2687 output.array_base = 61;
2688 if (next_clip_pos == 61)
2689 next_clip_pos = 62;
2690 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2691 ctx.shader->vs_out_misc_write = 1;
2692 ctx.shader->vs_out_viewport = 1;
2693 output.swizzle_x = 7;
2694 output.swizzle_y = 7;
2695 output.swizzle_z = 7;
2696 output.swizzle_w = 0;
2697 break;
2698 case TGSI_SEMANTIC_CLIPDIST:
2699 /* spi_sid is 0 for clipdistance outputs that were generated
2700 * for clipvertex - we don't need to pass them to PS */
2701 ctx.shader->clip_dist_write = gs->shader.clip_dist_write;
2702 ctx.shader->cull_dist_write = gs->shader.cull_dist_write;
2703 ctx.shader->cc_dist_mask = gs->shader.cc_dist_mask;
2704 if (out->spi_sid) {
2705 /* duplicate it as PARAM to pass to the pixel shader */
2706 output.array_base = next_param++;
2707 r600_bytecode_add_output(ctx.bc, &output);
2708 last_exp_param = ctx.bc->cf_last;
2709 }
2710 output.array_base = next_clip_pos++;
2711 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2712 break;
2713 case TGSI_SEMANTIC_FOG:
2714 output.swizzle_y = 4; /* 0 */
2715 output.swizzle_z = 4; /* 0 */
2716 output.swizzle_w = 5; /* 1 */
2717 break;
2718 default:
2719 output.array_base = next_param++;
2720 break;
2721 }
2722 r600_bytecode_add_output(ctx.bc, &output);
2723 if (output.type == V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM)
2724 last_exp_param = ctx.bc->cf_last;
2725 else
2726 last_exp_pos = ctx.bc->cf_last;
2727 }
2728
2729 if (!last_exp_pos) {
2730 memset(&output, 0, sizeof(output));
2731 output.gpr = 0;
2732 output.elem_size = 3;
2733 output.swizzle_x = 7;
2734 output.swizzle_y = 7;
2735 output.swizzle_z = 7;
2736 output.swizzle_w = 7;
2737 output.burst_count = 1;
2738 output.type = 2;
2739 output.op = CF_OP_EXPORT;
2740 output.array_base = 60;
2741 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2742 r600_bytecode_add_output(ctx.bc, &output);
2743 last_exp_pos = ctx.bc->cf_last;
2744 }
2745
2746 if (!last_exp_param) {
2747 memset(&output, 0, sizeof(output));
2748 output.gpr = 0;
2749 output.elem_size = 3;
2750 output.swizzle_x = 7;
2751 output.swizzle_y = 7;
2752 output.swizzle_z = 7;
2753 output.swizzle_w = 7;
2754 output.burst_count = 1;
2755 output.type = 2;
2756 output.op = CF_OP_EXPORT;
2757 output.array_base = next_param++;
2758 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2759 r600_bytecode_add_output(ctx.bc, &output);
2760 last_exp_param = ctx.bc->cf_last;
2761 }
2762
2763 last_exp_pos->op = CF_OP_EXPORT_DONE;
2764 last_exp_param->op = CF_OP_EXPORT_DONE;
2765
2766 r600_bytecode_add_cfinst(ctx.bc, CF_OP_POP);
2767 cf_pop = ctx.bc->cf_last;
2768
2769 cf_jump->cf_addr = cf_pop->id + 2;
2770 cf_jump->pop_count = 1;
2771 cf_pop->cf_addr = cf_pop->id + 2;
2772 cf_pop->pop_count = 1;
2773
2774 if (ctx.bc->chip_class == CAYMAN)
2775 cm_bytecode_add_cf_end(ctx.bc);
2776 else {
2777 r600_bytecode_add_cfinst(ctx.bc, CF_OP_NOP);
2778 ctx.bc->cf_last->end_of_program = 1;
2779 }
2780
2781 gs->gs_copy_shader = cshader;
2782 cshader->enabled_stream_buffers_mask = ctx.enabled_stream_buffers_mask;
2783
2784 ctx.bc->nstack = 1;
2785
2786 return r600_bytecode_build(ctx.bc);
2787 }
2788
2789 static int emit_inc_ring_offset(struct r600_shader_ctx *ctx, int idx, bool ind)
2790 {
2791 if (ind) {
2792 struct r600_bytecode_alu alu;
2793 int r;
2794
2795 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2796 alu.op = ALU_OP2_ADD_INT;
2797 alu.src[0].sel = ctx->gs_export_gpr_tregs[idx];
2798 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2799 alu.src[1].value = ctx->gs_out_ring_offset >> 4;
2800 alu.dst.sel = ctx->gs_export_gpr_tregs[idx];
2801 alu.dst.write = 1;
2802 alu.last = 1;
2803 r = r600_bytecode_add_alu(ctx->bc, &alu);
2804 if (r)
2805 return r;
2806 }
2807 return 0;
2808 }
2809
2810 static int emit_gs_ring_writes(struct r600_shader_ctx *ctx, const struct pipe_stream_output_info *so UNUSED, int stream, bool ind)
2811 {
2812 struct r600_bytecode_output output;
2813 int ring_offset;
2814 unsigned i, k;
2815 int effective_stream = stream == -1 ? 0 : stream;
2816 int idx = 0;
2817
2818 for (i = 0; i < ctx->shader->noutput; i++) {
2819 if (ctx->gs_for_vs) {
2820 /* for ES we need to lookup corresponding ring offset expected by GS
2821 * (map this output to GS input by name and sid) */
2822 /* FIXME precompute offsets */
2823 ring_offset = -1;
2824 for(k = 0; k < ctx->gs_for_vs->ninput; ++k) {
2825 struct r600_shader_io *in = &ctx->gs_for_vs->input[k];
2826 struct r600_shader_io *out = &ctx->shader->output[i];
2827 if (in->name == out->name && in->sid == out->sid)
2828 ring_offset = in->ring_offset;
2829 }
2830
2831 if (ring_offset == -1)
2832 continue;
2833 } else {
2834 ring_offset = idx * 16;
2835 idx++;
2836 }
2837
2838 if (stream > 0 && ctx->shader->output[i].name == TGSI_SEMANTIC_POSITION)
2839 continue;
2840 /* next_ring_offset after parsing input decls contains total size of
2841 * single vertex data, gs_next_vertex - current vertex index */
2842 if (!ind)
2843 ring_offset += ctx->gs_out_ring_offset * ctx->gs_next_vertex;
2844
2845 memset(&output, 0, sizeof(struct r600_bytecode_output));
2846 output.gpr = ctx->shader->output[i].gpr;
2847 output.elem_size = 3;
2848 output.comp_mask = 0xF;
2849 output.burst_count = 1;
2850
2851 if (ind)
2852 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND;
2853 else
2854 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE;
2855
2856 switch (stream) {
2857 default:
2858 case 0:
2859 output.op = CF_OP_MEM_RING; break;
2860 case 1:
2861 output.op = CF_OP_MEM_RING1; break;
2862 case 2:
2863 output.op = CF_OP_MEM_RING2; break;
2864 case 3:
2865 output.op = CF_OP_MEM_RING3; break;
2866 }
2867
2868 if (ind) {
2869 output.array_base = ring_offset >> 2; /* in dwords */
2870 output.array_size = 0xfff;
2871 output.index_gpr = ctx->gs_export_gpr_tregs[effective_stream];
2872 } else
2873 output.array_base = ring_offset >> 2; /* in dwords */
2874 r600_bytecode_add_output(ctx->bc, &output);
2875 }
2876
2877 ++ctx->gs_next_vertex;
2878 return 0;
2879 }
2880
2881
2882 static int r600_fetch_tess_io_info(struct r600_shader_ctx *ctx)
2883 {
2884 int r;
2885 struct r600_bytecode_vtx vtx;
2886 int temp_val = ctx->temp_reg;
2887 /* need to store the TCS output somewhere */
2888 r = single_alu_op2(ctx, ALU_OP1_MOV,
2889 temp_val, 0,
2890 V_SQ_ALU_SRC_LITERAL, 0,
2891 0, 0);
2892 if (r)
2893 return r;
2894
2895 /* used by VS/TCS */
2896 if (ctx->tess_input_info) {
2897 /* fetch tcs input values into resv space */
2898 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
2899 vtx.op = FETCH_OP_VFETCH;
2900 vtx.buffer_id = R600_LDS_INFO_CONST_BUFFER;
2901 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
2902 vtx.mega_fetch_count = 16;
2903 vtx.data_format = FMT_32_32_32_32;
2904 vtx.num_format_all = 2;
2905 vtx.format_comp_all = 1;
2906 vtx.use_const_fields = 0;
2907 vtx.endian = r600_endian_swap(32);
2908 vtx.srf_mode_all = 1;
2909 vtx.offset = 0;
2910 vtx.dst_gpr = ctx->tess_input_info;
2911 vtx.dst_sel_x = 0;
2912 vtx.dst_sel_y = 1;
2913 vtx.dst_sel_z = 2;
2914 vtx.dst_sel_w = 3;
2915 vtx.src_gpr = temp_val;
2916 vtx.src_sel_x = 0;
2917
2918 r = r600_bytecode_add_vtx(ctx->bc, &vtx);
2919 if (r)
2920 return r;
2921 }
2922
2923 /* used by TCS/TES */
2924 if (ctx->tess_output_info) {
2925 /* fetch tcs output values into resv space */
2926 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
2927 vtx.op = FETCH_OP_VFETCH;
2928 vtx.buffer_id = R600_LDS_INFO_CONST_BUFFER;
2929 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
2930 vtx.mega_fetch_count = 16;
2931 vtx.data_format = FMT_32_32_32_32;
2932 vtx.num_format_all = 2;
2933 vtx.format_comp_all = 1;
2934 vtx.use_const_fields = 0;
2935 vtx.endian = r600_endian_swap(32);
2936 vtx.srf_mode_all = 1;
2937 vtx.offset = 16;
2938 vtx.dst_gpr = ctx->tess_output_info;
2939 vtx.dst_sel_x = 0;
2940 vtx.dst_sel_y = 1;
2941 vtx.dst_sel_z = 2;
2942 vtx.dst_sel_w = 3;
2943 vtx.src_gpr = temp_val;
2944 vtx.src_sel_x = 0;
2945
2946 r = r600_bytecode_add_vtx(ctx->bc, &vtx);
2947 if (r)
2948 return r;
2949 }
2950 return 0;
2951 }
2952
2953 static int emit_lds_vs_writes(struct r600_shader_ctx *ctx)
2954 {
2955 int j, r;
2956 int temp_reg;
2957 unsigned i;
2958
2959 /* fetch tcs input values into input_vals */
2960 ctx->tess_input_info = r600_get_temp(ctx);
2961 ctx->tess_output_info = 0;
2962 r = r600_fetch_tess_io_info(ctx);
2963 if (r)
2964 return r;
2965
2966 temp_reg = r600_get_temp(ctx);
2967 /* dst reg contains LDS address stride * idx */
2968 /* MUL vertexID, vertex_dw_stride */
2969 r = single_alu_op2(ctx, ALU_OP2_MUL_UINT24,
2970 temp_reg, 0,
2971 ctx->tess_input_info, 1,
2972 0, 1); /* rel id in r0.y? */
2973 if (r)
2974 return r;
2975
2976 for (i = 0; i < ctx->shader->noutput; i++) {
2977 struct r600_bytecode_alu alu;
2978 int param = r600_get_lds_unique_index(ctx->shader->output[i].name, ctx->shader->output[i].sid);
2979
2980 if (param) {
2981 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
2982 temp_reg, 1,
2983 temp_reg, 0,
2984 V_SQ_ALU_SRC_LITERAL, param * 16);
2985 if (r)
2986 return r;
2987 }
2988
2989 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
2990 temp_reg, 2,
2991 temp_reg, param ? 1 : 0,
2992 V_SQ_ALU_SRC_LITERAL, 8);
2993 if (r)
2994 return r;
2995
2996
2997 for (j = 0; j < 2; j++) {
2998 int chan = (j == 1) ? 2 : (param ? 1 : 0);
2999 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3000 alu.op = LDS_OP3_LDS_WRITE_REL;
3001 alu.src[0].sel = temp_reg;
3002 alu.src[0].chan = chan;
3003 alu.src[1].sel = ctx->shader->output[i].gpr;
3004 alu.src[1].chan = j * 2;
3005 alu.src[2].sel = ctx->shader->output[i].gpr;
3006 alu.src[2].chan = (j * 2) + 1;
3007 alu.last = 1;
3008 alu.dst.chan = 0;
3009 alu.lds_idx = 1;
3010 alu.is_lds_idx_op = true;
3011 r = r600_bytecode_add_alu(ctx->bc, &alu);
3012 if (r)
3013 return r;
3014 }
3015 }
3016 return 0;
3017 }
3018
3019 static int r600_store_tcs_output(struct r600_shader_ctx *ctx)
3020 {
3021 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3022 const struct tgsi_full_dst_register *dst = &inst->Dst[0];
3023 int i, r, lasti;
3024 int temp_reg = r600_get_temp(ctx);
3025 struct r600_bytecode_alu alu;
3026 unsigned write_mask = dst->Register.WriteMask;
3027
3028 if (inst->Dst[0].Register.File != TGSI_FILE_OUTPUT)
3029 return 0;
3030
3031 r = get_lds_offset0(ctx, 1, temp_reg, dst->Register.Dimension ? false : true);
3032 if (r)
3033 return r;
3034
3035 /* the base address is now in temp.x */
3036 r = r600_get_byte_address(ctx, temp_reg,
3037 &inst->Dst[0], NULL, ctx->tess_output_info, 1);
3038 if (r)
3039 return r;
3040
3041 /* LDS write */
3042 lasti = tgsi_last_instruction(write_mask);
3043 for (i = 1; i <= lasti; i++) {
3044
3045 if (!(write_mask & (1 << i)))
3046 continue;
3047 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
3048 temp_reg, i,
3049 temp_reg, 0,
3050 V_SQ_ALU_SRC_LITERAL, 4 * i);
3051 if (r)
3052 return r;
3053 }
3054
3055 for (i = 0; i <= lasti; i++) {
3056 if (!(write_mask & (1 << i)))
3057 continue;
3058
3059 if ((i == 0 && ((write_mask & 3) == 3)) ||
3060 (i == 2 && ((write_mask & 0xc) == 0xc))) {
3061 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3062 alu.op = LDS_OP3_LDS_WRITE_REL;
3063 alu.src[0].sel = temp_reg;
3064 alu.src[0].chan = i;
3065
3066 alu.src[1].sel = dst->Register.Index;
3067 alu.src[1].sel += ctx->file_offset[dst->Register.File];
3068 alu.src[1].chan = i;
3069
3070 alu.src[2].sel = dst->Register.Index;
3071 alu.src[2].sel += ctx->file_offset[dst->Register.File];
3072 alu.src[2].chan = i + 1;
3073 alu.lds_idx = 1;
3074 alu.dst.chan = 0;
3075 alu.last = 1;
3076 alu.is_lds_idx_op = true;
3077 r = r600_bytecode_add_alu(ctx->bc, &alu);
3078 if (r)
3079 return r;
3080 i += 1;
3081 continue;
3082 }
3083 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3084 alu.op = LDS_OP2_LDS_WRITE;
3085 alu.src[0].sel = temp_reg;
3086 alu.src[0].chan = i;
3087
3088 alu.src[1].sel = dst->Register.Index;
3089 alu.src[1].sel += ctx->file_offset[dst->Register.File];
3090 alu.src[1].chan = i;
3091
3092 alu.src[2].sel = V_SQ_ALU_SRC_0;
3093 alu.dst.chan = 0;
3094 alu.last = 1;
3095 alu.is_lds_idx_op = true;
3096 r = r600_bytecode_add_alu(ctx->bc, &alu);
3097 if (r)
3098 return r;
3099 }
3100 return 0;
3101 }
3102
3103 static int r600_tess_factor_read(struct r600_shader_ctx *ctx,
3104 int output_idx, int nc)
3105 {
3106 int param;
3107 unsigned temp_reg = r600_get_temp(ctx);
3108 unsigned name = ctx->shader->output[output_idx].name;
3109 int dreg = ctx->shader->output[output_idx].gpr;
3110 int r;
3111
3112 param = r600_get_lds_unique_index(name, 0);
3113 r = get_lds_offset0(ctx, 1, temp_reg, true);
3114 if (r)
3115 return r;
3116
3117 if (param) {
3118 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
3119 temp_reg, 0,
3120 temp_reg, 0,
3121 V_SQ_ALU_SRC_LITERAL, param * 16);
3122 if (r)
3123 return r;
3124 }
3125
3126 do_lds_fetch_values(ctx, temp_reg, dreg, ((1u << nc) - 1));
3127 return 0;
3128 }
3129
3130 static int r600_emit_tess_factor(struct r600_shader_ctx *ctx)
3131 {
3132 int stride, outer_comps, inner_comps;
3133 int tessinner_idx = -1, tessouter_idx = -1;
3134 int i, r;
3135 unsigned j;
3136 int temp_reg = r600_get_temp(ctx);
3137 int treg[3] = {-1, -1, -1};
3138 struct r600_bytecode_alu alu;
3139 struct r600_bytecode_cf *cf_jump, *cf_pop;
3140
3141 /* only execute factor emission for invocation 0 */
3142 /* PRED_SETE_INT __, R0.x, 0 */
3143 memset(&alu, 0, sizeof(alu));
3144 alu.op = ALU_OP2_PRED_SETE_INT;
3145 alu.src[0].chan = 2;
3146 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
3147 alu.execute_mask = 1;
3148 alu.update_pred = 1;
3149 alu.last = 1;
3150 r600_bytecode_add_alu_type(ctx->bc, &alu, CF_OP_ALU_PUSH_BEFORE);
3151
3152 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP);
3153 cf_jump = ctx->bc->cf_last;
3154
3155 treg[0] = r600_get_temp(ctx);
3156 switch (ctx->shader->tcs_prim_mode) {
3157 case PIPE_PRIM_LINES:
3158 stride = 8; /* 2 dwords, 1 vec2 store */
3159 outer_comps = 2;
3160 inner_comps = 0;
3161 break;
3162 case PIPE_PRIM_TRIANGLES:
3163 stride = 16; /* 4 dwords, 1 vec4 store */
3164 outer_comps = 3;
3165 inner_comps = 1;
3166 treg[1] = r600_get_temp(ctx);
3167 break;
3168 case PIPE_PRIM_QUADS:
3169 stride = 24; /* 6 dwords, 2 stores (vec4 + vec2) */
3170 outer_comps = 4;
3171 inner_comps = 2;
3172 treg[1] = r600_get_temp(ctx);
3173 treg[2] = r600_get_temp(ctx);
3174 break;
3175 default:
3176 assert(0);
3177 return -1;
3178 }
3179
3180 /* R0 is InvocationID, RelPatchID, PatchID, tf_base */
3181 /* TF_WRITE takes index in R.x, value in R.y */
3182 for (j = 0; j < ctx->shader->noutput; j++) {
3183 if (ctx->shader->output[j].name == TGSI_SEMANTIC_TESSINNER)
3184 tessinner_idx = j;
3185 if (ctx->shader->output[j].name == TGSI_SEMANTIC_TESSOUTER)
3186 tessouter_idx = j;
3187 }
3188
3189 if (tessouter_idx == -1)
3190 return -1;
3191
3192 if (tessinner_idx == -1 && inner_comps)
3193 return -1;
3194
3195 if (tessouter_idx != -1) {
3196 r = r600_tess_factor_read(ctx, tessouter_idx, outer_comps);
3197 if (r)
3198 return r;
3199 }
3200
3201 if (tessinner_idx != -1) {
3202 r = r600_tess_factor_read(ctx, tessinner_idx, inner_comps);
3203 if (r)
3204 return r;
3205 }
3206
3207 /* r.x = tf_base(r0.w) + relpatchid(r0.y) * tf_stride */
3208 /* r.x = relpatchid(r0.y) * tf_stride */
3209
3210 /* multiply incoming r0.y * stride - t.x = r0.y * stride */
3211 /* add incoming r0.w to it: t.x = t.x + r0.w */
3212 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
3213 temp_reg, 0,
3214 0, 1,
3215 V_SQ_ALU_SRC_LITERAL, stride,
3216 0, 3);
3217 if (r)
3218 return r;
3219
3220 for (i = 0; i < outer_comps + inner_comps; i++) {
3221 int out_idx = i >= outer_comps ? tessinner_idx : tessouter_idx;
3222 int out_comp = i >= outer_comps ? i - outer_comps : i;
3223
3224 if (ctx->shader->tcs_prim_mode == PIPE_PRIM_LINES) {
3225 if (out_comp == 1)
3226 out_comp = 0;
3227 else if (out_comp == 0)
3228 out_comp = 1;
3229 }
3230
3231 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
3232 treg[i / 2], (2 * (i % 2)),
3233 temp_reg, 0,
3234 V_SQ_ALU_SRC_LITERAL, 4 * i);
3235 if (r)
3236 return r;
3237 r = single_alu_op2(ctx, ALU_OP1_MOV,
3238 treg[i / 2], 1 + (2 * (i%2)),
3239 ctx->shader->output[out_idx].gpr, out_comp,
3240 0, 0);
3241 if (r)
3242 return r;
3243 }
3244 for (i = 0; i < outer_comps + inner_comps; i++) {
3245 struct r600_bytecode_gds gds;
3246
3247 memset(&gds, 0, sizeof(struct r600_bytecode_gds));
3248 gds.src_gpr = treg[i / 2];
3249 gds.src_sel_x = 2 * (i % 2);
3250 gds.src_sel_y = 1 + (2 * (i % 2));
3251 gds.src_sel_z = 4;
3252 gds.dst_sel_x = 7;
3253 gds.dst_sel_y = 7;
3254 gds.dst_sel_z = 7;
3255 gds.dst_sel_w = 7;
3256 gds.op = FETCH_OP_TF_WRITE;
3257 r = r600_bytecode_add_gds(ctx->bc, &gds);
3258 if (r)
3259 return r;
3260 }
3261
3262 // Patch up jump label
3263 r600_bytecode_add_cfinst(ctx->bc, CF_OP_POP);
3264 cf_pop = ctx->bc->cf_last;
3265
3266 cf_jump->cf_addr = cf_pop->id + 2;
3267 cf_jump->pop_count = 1;
3268 cf_pop->cf_addr = cf_pop->id + 2;
3269 cf_pop->pop_count = 1;
3270
3271 return 0;
3272 }
3273
3274 /*
3275 * We have to work out the thread ID for load and atomic
3276 * operations, which store the returned value to an index
3277 * in an intermediate buffer.
3278 * The index is calculated by taking the thread id,
3279 * calculated from the MBCNT instructions.
3280 * Then the shader engine ID is multiplied by 256,
3281 * and the wave id is added.
3282 * Then the result is multipled by 64 and thread id is
3283 * added.
3284 */
3285 static int load_thread_id_gpr(struct r600_shader_ctx *ctx)
3286 {
3287 struct r600_bytecode_alu alu;
3288 int r;
3289
3290 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3291 alu.op = ALU_OP1_MBCNT_32LO_ACCUM_PREV_INT;
3292 alu.dst.sel = ctx->temp_reg;
3293 alu.dst.chan = 0;
3294 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
3295 alu.src[0].value = 0xffffffff;
3296 alu.dst.write = 1;
3297 r = r600_bytecode_add_alu(ctx->bc, &alu);
3298 if (r)
3299 return r;
3300
3301 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3302 alu.op = ALU_OP1_MBCNT_32HI_INT;
3303 alu.dst.sel = ctx->temp_reg;
3304 alu.dst.chan = 1;
3305 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
3306 alu.src[0].value = 0xffffffff;
3307 alu.dst.write = 1;
3308 r = r600_bytecode_add_alu(ctx->bc, &alu);
3309 if (r)
3310 return r;
3311
3312 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3313 alu.op = ALU_OP3_MULADD_UINT24;
3314 alu.dst.sel = ctx->temp_reg;
3315 alu.dst.chan = 2;
3316 alu.src[0].sel = EG_V_SQ_ALU_SRC_SE_ID;
3317 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
3318 alu.src[1].value = 256;
3319 alu.src[2].sel = EG_V_SQ_ALU_SRC_HW_WAVE_ID;
3320 alu.dst.write = 1;
3321 alu.is_op3 = 1;
3322 alu.last = 1;
3323 r = r600_bytecode_add_alu(ctx->bc, &alu);
3324 if (r)
3325 return r;
3326
3327 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
3328 ctx->thread_id_gpr, 1,
3329 ctx->temp_reg, 2,
3330 V_SQ_ALU_SRC_LITERAL, 0x40,
3331 ctx->temp_reg, 0);
3332 if (r)
3333 return r;
3334 return 0;
3335 }
3336
3337 static int r600_shader_from_tgsi(struct r600_context *rctx,
3338 struct r600_pipe_shader *pipeshader,
3339 union r600_shader_key key)
3340 {
3341 struct r600_screen *rscreen = rctx->screen;
3342 struct r600_shader *shader = &pipeshader->shader;
3343 struct tgsi_token *tokens = pipeshader->selector->tokens;
3344 struct pipe_stream_output_info so = pipeshader->selector->so;
3345 struct tgsi_full_immediate *immediate;
3346 struct r600_shader_ctx ctx;
3347 struct r600_bytecode_output output[ARRAY_SIZE(shader->output)];
3348 unsigned output_done, noutput;
3349 unsigned opcode;
3350 int j, k, r = 0;
3351 unsigned i;
3352 int next_param_base = 0, next_clip_base;
3353 int max_color_exports = MAX2(key.ps.nr_cbufs, 1);
3354 bool indirect_gprs;
3355 bool ring_outputs = false;
3356 bool lds_outputs = false;
3357 bool lds_inputs = false;
3358 bool pos_emitted = false;
3359
3360 ctx.bc = &shader->bc;
3361 ctx.shader = shader;
3362
3363 r600_bytecode_init(ctx.bc, rscreen->b.chip_class, rscreen->b.family,
3364 rscreen->has_compressed_msaa_texturing);
3365 ctx.tokens = tokens;
3366 tgsi_scan_shader(tokens, &ctx.info);
3367 shader->indirect_files = ctx.info.indirect_files;
3368
3369 int narrays = ctx.info.array_max[TGSI_FILE_TEMPORARY];
3370 ctx.array_infos = calloc(narrays, sizeof(*ctx.array_infos));
3371 ctx.spilled_arrays = calloc(narrays, sizeof(bool));
3372 tgsi_scan_arrays(tokens, TGSI_FILE_TEMPORARY, narrays, ctx.array_infos);
3373
3374 shader->uses_helper_invocation = false;
3375 shader->uses_doubles = ctx.info.uses_doubles;
3376 shader->uses_atomics = ctx.info.file_mask[TGSI_FILE_HW_ATOMIC];
3377 shader->nsys_inputs = 0;
3378
3379 shader->uses_images = ctx.info.file_count[TGSI_FILE_IMAGE] > 0 ||
3380 ctx.info.file_count[TGSI_FILE_BUFFER] > 0;
3381 indirect_gprs = ctx.info.indirect_files & ~((1 << TGSI_FILE_CONSTANT) | (1 << TGSI_FILE_SAMPLER));
3382 tgsi_parse_init(&ctx.parse, tokens);
3383 ctx.type = ctx.info.processor;
3384 shader->processor_type = ctx.type;
3385 ctx.bc->type = shader->processor_type;
3386
3387 switch (ctx.type) {
3388 case PIPE_SHADER_VERTEX:
3389 shader->vs_as_gs_a = key.vs.as_gs_a;
3390 shader->vs_as_es = key.vs.as_es;
3391 shader->vs_as_ls = key.vs.as_ls;
3392 shader->atomic_base = key.vs.first_atomic_counter;
3393 if (shader->vs_as_es)
3394 ring_outputs = true;
3395 if (shader->vs_as_ls)
3396 lds_outputs = true;
3397 break;
3398 case PIPE_SHADER_GEOMETRY:
3399 ring_outputs = true;
3400 shader->atomic_base = key.gs.first_atomic_counter;
3401 shader->gs_tri_strip_adj_fix = key.gs.tri_strip_adj_fix;
3402 break;
3403 case PIPE_SHADER_TESS_CTRL:
3404 shader->tcs_prim_mode = key.tcs.prim_mode;
3405 shader->atomic_base = key.tcs.first_atomic_counter;
3406 lds_outputs = true;
3407 lds_inputs = true;
3408 break;
3409 case PIPE_SHADER_TESS_EVAL:
3410 shader->tes_as_es = key.tes.as_es;
3411 shader->atomic_base = key.tes.first_atomic_counter;
3412 lds_inputs = true;
3413 if (shader->tes_as_es)
3414 ring_outputs = true;
3415 break;
3416 case PIPE_SHADER_FRAGMENT:
3417 shader->two_side = key.ps.color_two_side;
3418 shader->atomic_base = key.ps.first_atomic_counter;
3419 shader->rat_base = key.ps.nr_cbufs;
3420 shader->image_size_const_offset = key.ps.image_size_const_offset;
3421 break;
3422 case PIPE_SHADER_COMPUTE:
3423 shader->rat_base = 0;
3424 shader->image_size_const_offset = ctx.info.file_count[TGSI_FILE_SAMPLER];
3425 break;
3426 default:
3427 break;
3428 }
3429
3430 if (shader->vs_as_es || shader->tes_as_es) {
3431 ctx.gs_for_vs = &rctx->gs_shader->current->shader;
3432 } else {
3433 ctx.gs_for_vs = NULL;
3434 }
3435
3436 ctx.next_ring_offset = 0;
3437 ctx.gs_out_ring_offset = 0;
3438 ctx.gs_next_vertex = 0;
3439 ctx.gs_stream_output_info = &so;
3440
3441 ctx.thread_id_gpr = -1;
3442 ctx.face_gpr = -1;
3443 ctx.fixed_pt_position_gpr = -1;
3444 ctx.fragcoord_input = -1;
3445 ctx.colors_used = 0;
3446 ctx.clip_vertex_write = 0;
3447
3448 ctx.helper_invoc_reg = -1;
3449 ctx.cs_block_size_reg = -1;
3450 ctx.cs_grid_size_reg = -1;
3451 ctx.cs_block_size_loaded = false;
3452 ctx.cs_grid_size_loaded = false;
3453
3454 shader->nr_ps_color_exports = 0;
3455 shader->nr_ps_max_color_exports = 0;
3456
3457
3458 /* register allocations */
3459 /* Values [0,127] correspond to GPR[0..127].
3460 * Values [128,159] correspond to constant buffer bank 0
3461 * Values [160,191] correspond to constant buffer bank 1
3462 * Values [256,511] correspond to cfile constants c[0..255]. (Gone on EG)
3463 * Values [256,287] correspond to constant buffer bank 2 (EG)
3464 * Values [288,319] correspond to constant buffer bank 3 (EG)
3465 * Other special values are shown in the list below.
3466 * 244 ALU_SRC_1_DBL_L: special constant 1.0 double-float, LSW. (RV670+)
3467 * 245 ALU_SRC_1_DBL_M: special constant 1.0 double-float, MSW. (RV670+)
3468 * 246 ALU_SRC_0_5_DBL_L: special constant 0.5 double-float, LSW. (RV670+)
3469 * 247 ALU_SRC_0_5_DBL_M: special constant 0.5 double-float, MSW. (RV670+)
3470 * 248 SQ_ALU_SRC_0: special constant 0.0.
3471 * 249 SQ_ALU_SRC_1: special constant 1.0 float.
3472 * 250 SQ_ALU_SRC_1_INT: special constant 1 integer.
3473 * 251 SQ_ALU_SRC_M_1_INT: special constant -1 integer.
3474 * 252 SQ_ALU_SRC_0_5: special constant 0.5 float.
3475 * 253 SQ_ALU_SRC_LITERAL: literal constant.
3476 * 254 SQ_ALU_SRC_PV: previous vector result.
3477 * 255 SQ_ALU_SRC_PS: previous scalar result.
3478 */
3479 for (i = 0; i < TGSI_FILE_COUNT; i++) {
3480 ctx.file_offset[i] = 0;
3481 }
3482
3483 if (ctx.type == PIPE_SHADER_VERTEX) {
3484
3485 ctx.file_offset[TGSI_FILE_INPUT] = 1;
3486 if (ctx.info.num_inputs)
3487 r600_bytecode_add_cfinst(ctx.bc, CF_OP_CALL_FS);
3488 }
3489 if (ctx.type == PIPE_SHADER_FRAGMENT) {
3490 if (ctx.bc->chip_class >= EVERGREEN)
3491 ctx.file_offset[TGSI_FILE_INPUT] = evergreen_gpr_count(&ctx);
3492 else
3493 ctx.file_offset[TGSI_FILE_INPUT] = allocate_system_value_inputs(&ctx, ctx.file_offset[TGSI_FILE_INPUT]);
3494
3495 for (i = 0; i < PIPE_MAX_SHADER_INPUTS; i++) {
3496 if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_HELPER_INVOCATION) {
3497 ctx.helper_invoc_reg = ctx.file_offset[TGSI_FILE_INPUT]++;
3498 shader->uses_helper_invocation = true;
3499 }
3500 }
3501 }
3502 if (ctx.type == PIPE_SHADER_GEOMETRY) {
3503 /* FIXME 1 would be enough in some cases (3 or less input vertices) */
3504 ctx.file_offset[TGSI_FILE_INPUT] = 2;
3505 }
3506 if (ctx.type == PIPE_SHADER_TESS_CTRL)
3507 ctx.file_offset[TGSI_FILE_INPUT] = 1;
3508 if (ctx.type == PIPE_SHADER_TESS_EVAL) {
3509 bool add_tesscoord = false, add_tess_inout = false;
3510 ctx.file_offset[TGSI_FILE_INPUT] = 1;
3511 for (i = 0; i < PIPE_MAX_SHADER_INPUTS; i++) {
3512 /* if we have tesscoord save one reg */
3513 if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_TESSCOORD)
3514 add_tesscoord = true;
3515 if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_TESSINNER ||
3516 ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_TESSOUTER)
3517 add_tess_inout = true;
3518 }
3519 if (add_tesscoord || add_tess_inout)
3520 ctx.file_offset[TGSI_FILE_INPUT]++;
3521 if (add_tess_inout)
3522 ctx.file_offset[TGSI_FILE_INPUT]+=2;
3523 }
3524 if (ctx.type == PIPE_SHADER_COMPUTE) {
3525 ctx.file_offset[TGSI_FILE_INPUT] = 2;
3526 for (i = 0; i < PIPE_MAX_SHADER_INPUTS; i++) {
3527 if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_GRID_SIZE)
3528 ctx.cs_grid_size_reg = ctx.file_offset[TGSI_FILE_INPUT]++;
3529 if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_BLOCK_SIZE)
3530 ctx.cs_block_size_reg = ctx.file_offset[TGSI_FILE_INPUT]++;
3531 }
3532 }
3533
3534 ctx.file_offset[TGSI_FILE_OUTPUT] =
3535 ctx.file_offset[TGSI_FILE_INPUT] +
3536 ctx.info.file_max[TGSI_FILE_INPUT] + 1;
3537 ctx.file_offset[TGSI_FILE_TEMPORARY] = ctx.file_offset[TGSI_FILE_OUTPUT] +
3538 ctx.info.file_max[TGSI_FILE_OUTPUT] + 1;
3539
3540 /* Outside the GPR range. This will be translated to one of the
3541 * kcache banks later. */
3542 ctx.file_offset[TGSI_FILE_CONSTANT] = 512;
3543 ctx.file_offset[TGSI_FILE_IMMEDIATE] = V_SQ_ALU_SRC_LITERAL;
3544
3545 pipeshader->scratch_space_needed = 0;
3546 int regno = ctx.file_offset[TGSI_FILE_TEMPORARY] +
3547 ctx.info.file_max[TGSI_FILE_TEMPORARY];
3548 if (regno > 124) {
3549 choose_spill_arrays(&ctx, &regno, &pipeshader->scratch_space_needed);
3550 shader->indirect_files = ctx.info.indirect_files;
3551 }
3552 shader->needs_scratch_space = pipeshader->scratch_space_needed != 0;
3553
3554 ctx.bc->ar_reg = ++regno;
3555 ctx.bc->index_reg[0] = ++regno;
3556 ctx.bc->index_reg[1] = ++regno;
3557
3558 if (ctx.type == PIPE_SHADER_TESS_CTRL) {
3559 ctx.tess_input_info = ++regno;
3560 ctx.tess_output_info = ++regno;
3561 } else if (ctx.type == PIPE_SHADER_TESS_EVAL) {
3562 ctx.tess_input_info = 0;
3563 ctx.tess_output_info = ++regno;
3564 } else if (ctx.type == PIPE_SHADER_GEOMETRY) {
3565 ctx.gs_export_gpr_tregs[0] = ++regno;
3566 ctx.gs_export_gpr_tregs[1] = ++regno;
3567 ctx.gs_export_gpr_tregs[2] = ++regno;
3568 ctx.gs_export_gpr_tregs[3] = ++regno;
3569 if (ctx.shader->gs_tri_strip_adj_fix) {
3570 ctx.gs_rotated_input[0] = ++regno;
3571 ctx.gs_rotated_input[1] = ++regno;
3572 } else {
3573 ctx.gs_rotated_input[0] = 0;
3574 ctx.gs_rotated_input[1] = 1;
3575 }
3576 }
3577
3578 if (shader->uses_images) {
3579 ctx.thread_id_gpr = ++regno;
3580 }
3581 ctx.temp_reg = ++regno;
3582
3583 shader->max_arrays = 0;
3584 shader->num_arrays = 0;
3585 if (indirect_gprs) {
3586
3587 if (ctx.info.indirect_files & (1 << TGSI_FILE_INPUT)) {
3588 r600_add_gpr_array(shader, ctx.file_offset[TGSI_FILE_INPUT],
3589 ctx.file_offset[TGSI_FILE_OUTPUT] -
3590 ctx.file_offset[TGSI_FILE_INPUT],
3591 0x0F);
3592 }
3593 if (ctx.info.indirect_files & (1 << TGSI_FILE_OUTPUT)) {
3594 r600_add_gpr_array(shader, ctx.file_offset[TGSI_FILE_OUTPUT],
3595 ctx.file_offset[TGSI_FILE_TEMPORARY] -
3596 ctx.file_offset[TGSI_FILE_OUTPUT],
3597 0x0F);
3598 }
3599 }
3600
3601 ctx.nliterals = 0;
3602 ctx.literals = NULL;
3603 ctx.max_driver_temp_used = 0;
3604
3605 shader->fs_write_all = ctx.info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] &&
3606 ctx.info.colors_written == 1;
3607 shader->vs_position_window_space = ctx.info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
3608 shader->ps_conservative_z = (uint8_t)ctx.info.properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT];
3609
3610 if (ctx.type == PIPE_SHADER_VERTEX ||
3611 ctx.type == PIPE_SHADER_GEOMETRY ||
3612 ctx.type == PIPE_SHADER_TESS_EVAL) {
3613 shader->cc_dist_mask = (1 << (ctx.info.properties[TGSI_PROPERTY_NUM_CULLDIST_ENABLED] +
3614 ctx.info.properties[TGSI_PROPERTY_NUM_CLIPDIST_ENABLED])) - 1;
3615 shader->clip_dist_write = (1 << ctx.info.properties[TGSI_PROPERTY_NUM_CLIPDIST_ENABLED]) - 1;
3616 shader->cull_dist_write = ((1 << ctx.info.properties[TGSI_PROPERTY_NUM_CULLDIST_ENABLED]) - 1) << ctx.info.properties[TGSI_PROPERTY_NUM_CLIPDIST_ENABLED];
3617 }
3618
3619 if (shader->vs_as_gs_a)
3620 vs_add_primid_output(&ctx, key.vs.prim_id_out);
3621
3622 if (ctx.thread_id_gpr != -1) {
3623 r = load_thread_id_gpr(&ctx);
3624 if (r)
3625 return r;
3626 }
3627
3628 if (ctx.type == PIPE_SHADER_TESS_EVAL)
3629 r600_fetch_tess_io_info(&ctx);
3630
3631 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
3632 tgsi_parse_token(&ctx.parse);
3633 switch (ctx.parse.FullToken.Token.Type) {
3634 case TGSI_TOKEN_TYPE_IMMEDIATE:
3635 immediate = &ctx.parse.FullToken.FullImmediate;
3636 ctx.literals = realloc(ctx.literals, (ctx.nliterals + 1) * 16);
3637 if(ctx.literals == NULL) {
3638 r = -ENOMEM;
3639 goto out_err;
3640 }
3641 ctx.literals[ctx.nliterals * 4 + 0] = immediate->u[0].Uint;
3642 ctx.literals[ctx.nliterals * 4 + 1] = immediate->u[1].Uint;
3643 ctx.literals[ctx.nliterals * 4 + 2] = immediate->u[2].Uint;
3644 ctx.literals[ctx.nliterals * 4 + 3] = immediate->u[3].Uint;
3645 ctx.nliterals++;
3646 break;
3647 case TGSI_TOKEN_TYPE_DECLARATION:
3648 r = tgsi_declaration(&ctx);
3649 if (r)
3650 goto out_err;
3651 break;
3652 case TGSI_TOKEN_TYPE_INSTRUCTION:
3653 case TGSI_TOKEN_TYPE_PROPERTY:
3654 break;
3655 default:
3656 R600_ERR("unsupported token type %d\n", ctx.parse.FullToken.Token.Type);
3657 r = -EINVAL;
3658 goto out_err;
3659 }
3660 }
3661
3662 shader->ring_item_sizes[0] = ctx.next_ring_offset;
3663 shader->ring_item_sizes[1] = 0;
3664 shader->ring_item_sizes[2] = 0;
3665 shader->ring_item_sizes[3] = 0;
3666
3667 /* Process two side if needed */
3668 if (shader->two_side && ctx.colors_used) {
3669 int i, count = ctx.shader->ninput;
3670 unsigned next_lds_loc = ctx.shader->nlds;
3671
3672 /* additional inputs will be allocated right after the existing inputs,
3673 * we won't need them after the color selection, so we don't need to
3674 * reserve these gprs for the rest of the shader code and to adjust
3675 * output offsets etc. */
3676 int gpr = ctx.file_offset[TGSI_FILE_INPUT] +
3677 ctx.info.file_max[TGSI_FILE_INPUT] + 1;
3678
3679 /* if two sided and neither face or sample mask is used by shader, ensure face_gpr is emitted */
3680 if (ctx.face_gpr == -1) {
3681 i = ctx.shader->ninput++;
3682 ctx.shader->input[i].name = TGSI_SEMANTIC_FACE;
3683 ctx.shader->input[i].spi_sid = 0;
3684 ctx.shader->input[i].gpr = gpr++;
3685 ctx.face_gpr = ctx.shader->input[i].gpr;
3686 }
3687
3688 for (i = 0; i < count; i++) {
3689 if (ctx.shader->input[i].name == TGSI_SEMANTIC_COLOR) {
3690 int ni = ctx.shader->ninput++;
3691 memcpy(&ctx.shader->input[ni],&ctx.shader->input[i], sizeof(struct r600_shader_io));
3692 ctx.shader->input[ni].name = TGSI_SEMANTIC_BCOLOR;
3693 ctx.shader->input[ni].spi_sid = r600_spi_sid(&ctx.shader->input[ni]);
3694 ctx.shader->input[ni].gpr = gpr++;
3695 // TGSI to LLVM needs to know the lds position of inputs.
3696 // Non LLVM path computes it later (in process_twoside_color)
3697 ctx.shader->input[ni].lds_pos = next_lds_loc++;
3698 ctx.shader->input[i].back_color_input = ni;
3699 if (ctx.bc->chip_class >= EVERGREEN) {
3700 if ((r = evergreen_interp_input(&ctx, ni)))
3701 return r;
3702 }
3703 }
3704 }
3705 }
3706
3707 if (shader->fs_write_all && rscreen->b.chip_class >= EVERGREEN)
3708 shader->nr_ps_max_color_exports = 8;
3709
3710 if (ctx.shader->uses_helper_invocation) {
3711 if (ctx.bc->chip_class == CAYMAN)
3712 r = cm_load_helper_invocation(&ctx);
3713 else
3714 r = eg_load_helper_invocation(&ctx);
3715 if (r)
3716 return r;
3717 }
3718
3719 /*
3720 * XXX this relies on fixed_pt_position_gpr only being present when
3721 * this shader should be executed per sample. Should be the case for now...
3722 */
3723 if (ctx.fixed_pt_position_gpr != -1 && ctx.info.reads_samplemask) {
3724 /*
3725 * Fix up sample mask. The hw always gives us coverage mask for
3726 * the pixel. However, for per-sample shading, we need the
3727 * coverage for the shader invocation only.
3728 * Also, with disabled msaa, only the first bit should be set
3729 * (luckily the same fixup works for both problems).
3730 * For now, we can only do it if we know this shader is always
3731 * executed per sample (due to usage of bits in the shader
3732 * forcing per-sample execution).
3733 * If the fb is not multisampled, we'd do unnecessary work but
3734 * it should still be correct.
3735 * It will however do nothing for sample shading according
3736 * to MinSampleShading.
3737 */
3738 struct r600_bytecode_alu alu;
3739 int tmp = r600_get_temp(&ctx);
3740 assert(ctx.face_gpr != -1);
3741 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3742
3743 alu.op = ALU_OP2_LSHL_INT;
3744 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
3745 alu.src[0].value = 0x1;
3746 alu.src[1].sel = ctx.fixed_pt_position_gpr;
3747 alu.src[1].chan = 3;
3748 alu.dst.sel = tmp;
3749 alu.dst.chan = 0;
3750 alu.dst.write = 1;
3751 alu.last = 1;
3752 if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
3753 return r;
3754
3755 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3756 alu.op = ALU_OP2_AND_INT;
3757 alu.src[0].sel = tmp;
3758 alu.src[1].sel = ctx.face_gpr;
3759 alu.src[1].chan = 2;
3760 alu.dst.sel = ctx.face_gpr;
3761 alu.dst.chan = 2;
3762 alu.dst.write = 1;
3763 alu.last = 1;
3764 if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
3765 return r;
3766 }
3767
3768 if (ctx.fragcoord_input >= 0) {
3769 if (ctx.bc->chip_class == CAYMAN) {
3770 for (j = 0 ; j < 4; j++) {
3771 struct r600_bytecode_alu alu;
3772 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3773 alu.op = ALU_OP1_RECIP_IEEE;
3774 alu.src[0].sel = shader->input[ctx.fragcoord_input].gpr;
3775 alu.src[0].chan = 3;
3776
3777 alu.dst.sel = shader->input[ctx.fragcoord_input].gpr;
3778 alu.dst.chan = j;
3779 alu.dst.write = (j == 3);
3780 alu.last = (j == 3);
3781 if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
3782 return r;
3783 }
3784 } else {
3785 struct r600_bytecode_alu alu;
3786 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3787 alu.op = ALU_OP1_RECIP_IEEE;
3788 alu.src[0].sel = shader->input[ctx.fragcoord_input].gpr;
3789 alu.src[0].chan = 3;
3790
3791 alu.dst.sel = shader->input[ctx.fragcoord_input].gpr;
3792 alu.dst.chan = 3;
3793 alu.dst.write = 1;
3794 alu.last = 1;
3795 if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
3796 return r;
3797 }
3798 }
3799
3800 if (ctx.type == PIPE_SHADER_GEOMETRY) {
3801 struct r600_bytecode_alu alu;
3802 int r;
3803
3804 /* GS thread with no output workaround - emit a cut at start of GS */
3805 if (ctx.bc->chip_class == R600)
3806 r600_bytecode_add_cfinst(ctx.bc, CF_OP_CUT_VERTEX);
3807
3808 for (j = 0; j < 4; j++) {
3809 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3810 alu.op = ALU_OP1_MOV;
3811 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
3812 alu.src[0].value = 0;
3813 alu.dst.sel = ctx.gs_export_gpr_tregs[j];
3814 alu.dst.write = 1;
3815 alu.last = 1;
3816 r = r600_bytecode_add_alu(ctx.bc, &alu);
3817 if (r)
3818 return r;
3819 }
3820
3821 if (ctx.shader->gs_tri_strip_adj_fix) {
3822 r = single_alu_op2(&ctx, ALU_OP2_AND_INT,
3823 ctx.gs_rotated_input[0], 2,
3824 0, 2,
3825 V_SQ_ALU_SRC_LITERAL, 1);
3826 if (r)
3827 return r;
3828
3829 for (i = 0; i < 6; i++) {
3830 int rotated = (i + 4) % 6;
3831 int offset_reg = i / 3;
3832 int offset_chan = i % 3;
3833 int rotated_offset_reg = rotated / 3;
3834 int rotated_offset_chan = rotated % 3;
3835
3836 if (offset_reg == 0 && offset_chan == 2)
3837 offset_chan = 3;
3838 if (rotated_offset_reg == 0 && rotated_offset_chan == 2)
3839 rotated_offset_chan = 3;
3840
3841 r = single_alu_op3(&ctx, ALU_OP3_CNDE_INT,
3842 ctx.gs_rotated_input[offset_reg], offset_chan,
3843 ctx.gs_rotated_input[0], 2,
3844 offset_reg, offset_chan,
3845 rotated_offset_reg, rotated_offset_chan);
3846 if (r)
3847 return r;
3848 }
3849 }
3850 }
3851
3852 if (ctx.type == PIPE_SHADER_TESS_CTRL)
3853 r600_fetch_tess_io_info(&ctx);
3854
3855 if (shader->two_side && ctx.colors_used) {
3856 if ((r = process_twoside_color_inputs(&ctx)))
3857 return r;
3858 }
3859
3860 tgsi_parse_init(&ctx.parse, tokens);
3861 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
3862 tgsi_parse_token(&ctx.parse);
3863 switch (ctx.parse.FullToken.Token.Type) {
3864 case TGSI_TOKEN_TYPE_INSTRUCTION:
3865 r = tgsi_is_supported(&ctx);
3866 if (r)
3867 goto out_err;
3868 ctx.max_driver_temp_used = 0;
3869 /* reserve first tmp for everyone */
3870 r600_get_temp(&ctx);
3871
3872 opcode = ctx.parse.FullToken.FullInstruction.Instruction.Opcode;
3873 if ((r = tgsi_split_constant(&ctx)))
3874 goto out_err;
3875 if ((r = tgsi_split_literal_constant(&ctx)))
3876 goto out_err;
3877 if (ctx.type == PIPE_SHADER_GEOMETRY) {
3878 if ((r = tgsi_split_gs_inputs(&ctx)))
3879 goto out_err;
3880 } else if (lds_inputs) {
3881 if ((r = tgsi_split_lds_inputs(&ctx)))
3882 goto out_err;
3883 }
3884 if (ctx.bc->chip_class == CAYMAN)
3885 ctx.inst_info = &cm_shader_tgsi_instruction[opcode];
3886 else if (ctx.bc->chip_class >= EVERGREEN)
3887 ctx.inst_info = &eg_shader_tgsi_instruction[opcode];
3888 else
3889 ctx.inst_info = &r600_shader_tgsi_instruction[opcode];
3890 r = ctx.inst_info->process(&ctx);
3891 if (r)
3892 goto out_err;
3893
3894 if (ctx.type == PIPE_SHADER_TESS_CTRL) {
3895 r = r600_store_tcs_output(&ctx);
3896 if (r)
3897 goto out_err;
3898 }
3899 break;
3900 default:
3901 break;
3902 }
3903 }
3904
3905 /* Reset the temporary register counter. */
3906 ctx.max_driver_temp_used = 0;
3907
3908 noutput = shader->noutput;
3909
3910 if (!ring_outputs && ctx.clip_vertex_write) {
3911 unsigned clipdist_temp[2];
3912
3913 clipdist_temp[0] = r600_get_temp(&ctx);
3914 clipdist_temp[1] = r600_get_temp(&ctx);
3915
3916 /* need to convert a clipvertex write into clipdistance writes and not export
3917 the clip vertex anymore */
3918
3919 memset(&shader->output[noutput], 0, 2*sizeof(struct r600_shader_io));
3920 shader->output[noutput].name = TGSI_SEMANTIC_CLIPDIST;
3921 shader->output[noutput].gpr = clipdist_temp[0];
3922 noutput++;
3923 shader->output[noutput].name = TGSI_SEMANTIC_CLIPDIST;
3924 shader->output[noutput].gpr = clipdist_temp[1];
3925 noutput++;
3926
3927 /* reset spi_sid for clipvertex output to avoid confusing spi */
3928 shader->output[ctx.cv_output].spi_sid = 0;
3929
3930 shader->clip_dist_write = 0xFF;
3931 shader->cc_dist_mask = 0xFF;
3932
3933 for (i = 0; i < 8; i++) {
3934 int oreg = i >> 2;
3935 int ochan = i & 3;
3936
3937 for (j = 0; j < 4; j++) {
3938 struct r600_bytecode_alu alu;
3939 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3940 alu.op = ALU_OP2_DOT4;
3941 alu.src[0].sel = shader->output[ctx.cv_output].gpr;
3942 alu.src[0].chan = j;
3943
3944 alu.src[1].sel = 512 + i;
3945 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
3946 alu.src[1].chan = j;
3947
3948 alu.dst.sel = clipdist_temp[oreg];
3949 alu.dst.chan = j;
3950 alu.dst.write = (j == ochan);
3951 if (j == 3)
3952 alu.last = 1;
3953 r = r600_bytecode_add_alu(ctx.bc, &alu);
3954 if (r)
3955 return r;
3956 }
3957 }
3958 }
3959
3960 /* Add stream outputs. */
3961 if (so.num_outputs) {
3962 bool emit = false;
3963 if (!lds_outputs && !ring_outputs && ctx.type == PIPE_SHADER_VERTEX)
3964 emit = true;
3965 if (!ring_outputs && ctx.type == PIPE_SHADER_TESS_EVAL)
3966 emit = true;
3967 if (emit)
3968 emit_streamout(&ctx, &so, -1, NULL);
3969 }
3970 pipeshader->enabled_stream_buffers_mask = ctx.enabled_stream_buffers_mask;
3971 convert_edgeflag_to_int(&ctx);
3972
3973 if (ctx.type == PIPE_SHADER_TESS_CTRL)
3974 r600_emit_tess_factor(&ctx);
3975
3976 if (lds_outputs) {
3977 if (ctx.type == PIPE_SHADER_VERTEX) {
3978 if (ctx.shader->noutput)
3979 emit_lds_vs_writes(&ctx);
3980 }
3981 } else if (ring_outputs) {
3982 if (shader->vs_as_es || shader->tes_as_es) {
3983 ctx.gs_export_gpr_tregs[0] = r600_get_temp(&ctx);
3984 ctx.gs_export_gpr_tregs[1] = -1;
3985 ctx.gs_export_gpr_tregs[2] = -1;
3986 ctx.gs_export_gpr_tregs[3] = -1;
3987
3988 emit_gs_ring_writes(&ctx, &so, -1, FALSE);
3989 }
3990 } else {
3991 /* Export output */
3992 next_clip_base = shader->vs_out_misc_write ? 62 : 61;
3993
3994 for (i = 0, j = 0; i < noutput; i++, j++) {
3995 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
3996 output[j].gpr = shader->output[i].gpr;
3997 output[j].elem_size = 3;
3998 output[j].swizzle_x = 0;
3999 output[j].swizzle_y = 1;
4000 output[j].swizzle_z = 2;
4001 output[j].swizzle_w = 3;
4002 output[j].burst_count = 1;
4003 output[j].type = 0xffffffff;
4004 output[j].op = CF_OP_EXPORT;
4005 switch (ctx.type) {
4006 case PIPE_SHADER_VERTEX:
4007 case PIPE_SHADER_TESS_EVAL:
4008 switch (shader->output[i].name) {
4009 case TGSI_SEMANTIC_POSITION:
4010 output[j].array_base = 60;
4011 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
4012 pos_emitted = true;
4013 break;
4014
4015 case TGSI_SEMANTIC_PSIZE:
4016 output[j].array_base = 61;
4017 output[j].swizzle_y = 7;
4018 output[j].swizzle_z = 7;
4019 output[j].swizzle_w = 7;
4020 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
4021 pos_emitted = true;
4022 break;
4023 case TGSI_SEMANTIC_EDGEFLAG:
4024 output[j].array_base = 61;
4025 output[j].swizzle_x = 7;
4026 output[j].swizzle_y = 0;
4027 output[j].swizzle_z = 7;
4028 output[j].swizzle_w = 7;
4029 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
4030 pos_emitted = true;
4031 break;
4032 case TGSI_SEMANTIC_LAYER:
4033 /* spi_sid is 0 for outputs that are
4034 * not consumed by PS */
4035 if (shader->output[i].spi_sid) {
4036 output[j].array_base = next_param_base++;
4037 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
4038 j++;
4039 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
4040 }
4041 output[j].array_base = 61;
4042 output[j].swizzle_x = 7;
4043 output[j].swizzle_y = 7;
4044 output[j].swizzle_z = 0;
4045 output[j].swizzle_w = 7;
4046 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
4047 pos_emitted = true;
4048 break;
4049 case TGSI_SEMANTIC_VIEWPORT_INDEX:
4050 /* spi_sid is 0 for outputs that are
4051 * not consumed by PS */
4052 if (shader->output[i].spi_sid) {
4053 output[j].array_base = next_param_base++;
4054 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
4055 j++;
4056 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
4057 }
4058 output[j].array_base = 61;
4059 output[j].swizzle_x = 7;
4060 output[j].swizzle_y = 7;
4061 output[j].swizzle_z = 7;
4062 output[j].swizzle_w = 0;
4063 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
4064 pos_emitted = true;
4065 break;
4066 case TGSI_SEMANTIC_CLIPVERTEX:
4067 j--;
4068 break;
4069 case TGSI_SEMANTIC_CLIPDIST:
4070 output[j].array_base = next_clip_base++;
4071 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
4072 pos_emitted = true;
4073 /* spi_sid is 0 for clipdistance outputs that were generated
4074 * for clipvertex - we don't need to pass them to PS */
4075 if (shader->output[i].spi_sid) {
4076 j++;
4077 /* duplicate it as PARAM to pass to the pixel shader */
4078 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
4079 output[j].array_base = next_param_base++;
4080 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
4081 }
4082 break;
4083 case TGSI_SEMANTIC_FOG:
4084 output[j].swizzle_y = 4; /* 0 */
4085 output[j].swizzle_z = 4; /* 0 */
4086 output[j].swizzle_w = 5; /* 1 */
4087 break;
4088 case TGSI_SEMANTIC_PRIMID:
4089 output[j].swizzle_x = 2;
4090 output[j].swizzle_y = 4; /* 0 */
4091 output[j].swizzle_z = 4; /* 0 */
4092 output[j].swizzle_w = 4; /* 0 */
4093 break;
4094 }
4095
4096 break;
4097 case PIPE_SHADER_FRAGMENT:
4098 if (shader->output[i].name == TGSI_SEMANTIC_COLOR) {
4099 /* never export more colors than the number of CBs */
4100 if (shader->output[i].sid >= max_color_exports) {
4101 /* skip export */
4102 j--;
4103 continue;
4104 }
4105 output[j].swizzle_w = key.ps.alpha_to_one ? 5 : 3;
4106 output[j].array_base = shader->output[i].sid;
4107 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
4108 shader->nr_ps_color_exports++;
4109 shader->ps_color_export_mask |= (0xf << (shader->output[i].sid * 4));
4110
4111 /* If the i-th target format is set, all previous target formats must
4112 * be non-zero to avoid hangs. - from radeonsi, seems to apply to eg as well.
4113 */
4114 if (shader->output[i].sid > 0)
4115 for (unsigned x = 0; x < shader->output[i].sid; x++)
4116 shader->ps_color_export_mask |= (1 << (x*4));
4117
4118 if (shader->output[i].sid > shader->ps_export_highest)
4119 shader->ps_export_highest = shader->output[i].sid;
4120 if (shader->fs_write_all && (rscreen->b.chip_class >= EVERGREEN)) {
4121 for (k = 1; k < max_color_exports; k++) {
4122 j++;
4123 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
4124 output[j].gpr = shader->output[i].gpr;
4125 output[j].elem_size = 3;
4126 output[j].swizzle_x = 0;
4127 output[j].swizzle_y = 1;
4128 output[j].swizzle_z = 2;
4129 output[j].swizzle_w = key.ps.alpha_to_one ? 5 : 3;
4130 output[j].burst_count = 1;
4131 output[j].array_base = k;
4132 output[j].op = CF_OP_EXPORT;
4133 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
4134 shader->nr_ps_color_exports++;
4135 if (k > shader->ps_export_highest)
4136 shader->ps_export_highest = k;
4137 shader->ps_color_export_mask |= (0xf << (j * 4));
4138 }
4139 }
4140 } else if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
4141 output[j].array_base = 61;
4142 output[j].swizzle_x = 2;
4143 output[j].swizzle_y = 7;
4144 output[j].swizzle_z = output[j].swizzle_w = 7;
4145 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
4146 } else if (shader->output[i].name == TGSI_SEMANTIC_STENCIL) {
4147 output[j].array_base = 61;
4148 output[j].swizzle_x = 7;
4149 output[j].swizzle_y = 1;
4150 output[j].swizzle_z = output[j].swizzle_w = 7;
4151 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
4152 } else if (shader->output[i].name == TGSI_SEMANTIC_SAMPLEMASK) {
4153 output[j].array_base = 61;
4154 output[j].swizzle_x = 7;
4155 output[j].swizzle_y = 7;
4156 output[j].swizzle_z = 0;
4157 output[j].swizzle_w = 7;
4158 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
4159 } else {
4160 R600_ERR("unsupported fragment output name %d\n", shader->output[i].name);
4161 r = -EINVAL;
4162 goto out_err;
4163 }
4164 break;
4165 case PIPE_SHADER_TESS_CTRL:
4166 break;
4167 default:
4168 R600_ERR("unsupported processor type %d\n", ctx.type);
4169 r = -EINVAL;
4170 goto out_err;
4171 }
4172
4173 if (output[j].type == 0xffffffff) {
4174 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
4175 output[j].array_base = next_param_base++;
4176 }
4177 }
4178
4179 /* add fake position export */
4180 if ((ctx.type == PIPE_SHADER_VERTEX || ctx.type == PIPE_SHADER_TESS_EVAL) && pos_emitted == false) {
4181 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
4182 output[j].gpr = 0;
4183 output[j].elem_size = 3;
4184 output[j].swizzle_x = 7;
4185 output[j].swizzle_y = 7;
4186 output[j].swizzle_z = 7;
4187 output[j].swizzle_w = 7;
4188 output[j].burst_count = 1;
4189 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
4190 output[j].array_base = 60;
4191 output[j].op = CF_OP_EXPORT;
4192 j++;
4193 }
4194
4195 /* add fake param output for vertex shader if no param is exported */
4196 if ((ctx.type == PIPE_SHADER_VERTEX || ctx.type == PIPE_SHADER_TESS_EVAL) && next_param_base == 0) {
4197 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
4198 output[j].gpr = 0;
4199 output[j].elem_size = 3;
4200 output[j].swizzle_x = 7;
4201 output[j].swizzle_y = 7;
4202 output[j].swizzle_z = 7;
4203 output[j].swizzle_w = 7;
4204 output[j].burst_count = 1;
4205 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
4206 output[j].array_base = 0;
4207 output[j].op = CF_OP_EXPORT;
4208 j++;
4209 }
4210
4211 /* add fake pixel export */
4212 if (ctx.type == PIPE_SHADER_FRAGMENT && shader->nr_ps_color_exports == 0) {
4213 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
4214 output[j].gpr = 0;
4215 output[j].elem_size = 3;
4216 output[j].swizzle_x = 7;
4217 output[j].swizzle_y = 7;
4218 output[j].swizzle_z = 7;
4219 output[j].swizzle_w = 7;
4220 output[j].burst_count = 1;
4221 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
4222 output[j].array_base = 0;
4223 output[j].op = CF_OP_EXPORT;
4224 j++;
4225 shader->nr_ps_color_exports++;
4226 shader->ps_color_export_mask = 0xf;
4227 }
4228
4229 noutput = j;
4230
4231 /* set export done on last export of each type */
4232 for (k = noutput - 1, output_done = 0; k >= 0; k--) {
4233 if (!(output_done & (1 << output[k].type))) {
4234 output_done |= (1 << output[k].type);
4235 output[k].op = CF_OP_EXPORT_DONE;
4236 }
4237 }
4238 /* add output to bytecode */
4239 for (i = 0; i < noutput; i++) {
4240 r = r600_bytecode_add_output(ctx.bc, &output[i]);
4241 if (r)
4242 goto out_err;
4243 }
4244 }
4245
4246 /* add program end */
4247 if (ctx.bc->chip_class == CAYMAN)
4248 cm_bytecode_add_cf_end(ctx.bc);
4249 else {
4250 const struct cf_op_info *last = NULL;
4251
4252 if (ctx.bc->cf_last)
4253 last = r600_isa_cf(ctx.bc->cf_last->op);
4254
4255 /* alu clause instructions don't have EOP bit, so add NOP */
4256 if (!last || last->flags & CF_ALU || ctx.bc->cf_last->op == CF_OP_LOOP_END || ctx.bc->cf_last->op == CF_OP_POP)
4257 r600_bytecode_add_cfinst(ctx.bc, CF_OP_NOP);
4258
4259 ctx.bc->cf_last->end_of_program = 1;
4260 }
4261
4262 /* check GPR limit - we have 124 = 128 - 4
4263 * (4 are reserved as alu clause temporary registers) */
4264 if (ctx.bc->ngpr > 124) {
4265 R600_ERR("GPR limit exceeded - shader requires %d registers\n", ctx.bc->ngpr);
4266 r = -ENOMEM;
4267 goto out_err;
4268 }
4269
4270 if (ctx.type == PIPE_SHADER_GEOMETRY) {
4271 if ((r = generate_gs_copy_shader(rctx, pipeshader, &so)))
4272 return r;
4273 }
4274
4275 free(ctx.spilled_arrays);
4276 free(ctx.array_infos);
4277 free(ctx.literals);
4278 tgsi_parse_free(&ctx.parse);
4279 return 0;
4280 out_err:
4281 free(ctx.spilled_arrays);
4282 free(ctx.array_infos);
4283 free(ctx.literals);
4284 tgsi_parse_free(&ctx.parse);
4285 return r;
4286 }
4287
4288 static int tgsi_unsupported(struct r600_shader_ctx *ctx)
4289 {
4290 const unsigned tgsi_opcode =
4291 ctx->parse.FullToken.FullInstruction.Instruction.Opcode;
4292 R600_ERR("%s tgsi opcode unsupported\n",
4293 tgsi_get_opcode_name(tgsi_opcode));
4294 return -EINVAL;
4295 }
4296
4297 static int tgsi_end(struct r600_shader_ctx *ctx UNUSED)
4298 {
4299 return 0;
4300 }
4301
4302 static void r600_bytecode_src(struct r600_bytecode_alu_src *bc_src,
4303 const struct r600_shader_src *shader_src,
4304 unsigned chan)
4305 {
4306 bc_src->sel = shader_src->sel;
4307 bc_src->chan = shader_src->swizzle[chan];
4308 bc_src->neg = shader_src->neg;
4309 bc_src->abs = shader_src->abs;
4310 bc_src->rel = shader_src->rel;
4311 bc_src->value = shader_src->value[bc_src->chan];
4312 bc_src->kc_bank = shader_src->kc_bank;
4313 bc_src->kc_rel = shader_src->kc_rel;
4314 }
4315
4316 static void r600_bytecode_src_set_abs(struct r600_bytecode_alu_src *bc_src)
4317 {
4318 bc_src->abs = 1;
4319 bc_src->neg = 0;
4320 }
4321
4322 static void r600_bytecode_src_toggle_neg(struct r600_bytecode_alu_src *bc_src)
4323 {
4324 bc_src->neg = !bc_src->neg;
4325 }
4326
4327 static void tgsi_dst(struct r600_shader_ctx *ctx,
4328 const struct tgsi_full_dst_register *tgsi_dst,
4329 unsigned swizzle,
4330 struct r600_bytecode_alu_dst *r600_dst)
4331 {
4332 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4333
4334 if (tgsi_dst->Register.File == TGSI_FILE_TEMPORARY) {
4335 bool spilled;
4336 unsigned idx;
4337
4338 idx = map_tgsi_reg_index_to_r600_gpr(ctx, tgsi_dst->Register.Index, &spilled);
4339
4340 if (spilled) {
4341 struct r600_bytecode_output cf;
4342 int reg = 0;
4343 int r;
4344 bool add_pending_output = true;
4345
4346 memset(&cf, 0, sizeof(struct r600_bytecode_output));
4347 get_spilled_array_base_and_size(ctx, tgsi_dst->Register.Index,
4348 &cf.array_base, &cf.array_size);
4349
4350 /* If no component has spilled, reserve a register and add the spill code
4351 * ctx->bc->n_pending_outputs is cleared after each instruction group */
4352 if (ctx->bc->n_pending_outputs == 0) {
4353 reg = r600_get_temp(ctx);
4354 } else {
4355 /* If we are already spilling and the output address is the same like
4356 * before then just reuse the same slot */
4357 struct r600_bytecode_output *tmpl = &ctx->bc->pending_outputs[ctx->bc->n_pending_outputs-1];
4358 if ((cf.array_base + idx == tmpl->array_base) ||
4359 (cf.array_base == tmpl->array_base &&
4360 tmpl->index_gpr == ctx->bc->ar_reg &&
4361 tgsi_dst->Register.Indirect)) {
4362 reg = ctx->bc->pending_outputs[0].gpr;
4363 add_pending_output = false;
4364 } else {
4365 reg = r600_get_temp(ctx);
4366 }
4367 }
4368
4369 r600_dst->sel = reg;
4370 r600_dst->chan = swizzle;
4371 r600_dst->write = 1;
4372 if (inst->Instruction.Saturate) {
4373 r600_dst->clamp = 1;
4374 }
4375
4376 /* Add new outputs as pending */
4377 if (add_pending_output) {
4378 cf.op = CF_OP_MEM_SCRATCH;
4379 cf.elem_size = 3;
4380 cf.gpr = reg;
4381 cf.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE;
4382 cf.mark = 1;
4383 cf.comp_mask = inst->Dst[0].Register.WriteMask;
4384 cf.swizzle_x = 0;
4385 cf.swizzle_y = 1;
4386 cf.swizzle_z = 2;
4387 cf.swizzle_w = 3;
4388 cf.burst_count = 1;
4389
4390 if (tgsi_dst->Register.Indirect) {
4391 if (ctx->bc->chip_class < R700)
4392 cf.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND;
4393 else
4394 cf.type = 3; // V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND_ACK;
4395 cf.index_gpr = ctx->bc->ar_reg;
4396 }
4397 else {
4398 cf.array_base += idx;
4399 cf.array_size = 0;
4400 }
4401
4402 r = r600_bytecode_add_pending_output(ctx->bc, &cf);
4403 if (r)
4404 return;
4405
4406 if (ctx->bc->chip_class >= R700)
4407 r600_bytecode_need_wait_ack(ctx->bc, true);
4408 }
4409 return;
4410 }
4411 else {
4412 r600_dst->sel = idx;
4413 }
4414 }
4415 else {
4416 r600_dst->sel = tgsi_dst->Register.Index;
4417 r600_dst->sel += ctx->file_offset[tgsi_dst->Register.File];
4418 }
4419 r600_dst->chan = swizzle;
4420 r600_dst->write = 1;
4421 if (inst->Instruction.Saturate) {
4422 r600_dst->clamp = 1;
4423 }
4424 if (ctx->type == PIPE_SHADER_TESS_CTRL) {
4425 if (tgsi_dst->Register.File == TGSI_FILE_OUTPUT) {
4426 return;
4427 }
4428 }
4429 if (tgsi_dst->Register.Indirect)
4430 r600_dst->rel = V_SQ_REL_RELATIVE;
4431
4432 }
4433
4434 static int tgsi_op2_64_params(struct r600_shader_ctx *ctx, bool singledest, bool swap, int dest_temp, int op_override)
4435 {
4436 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4437 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4438 struct r600_bytecode_alu alu;
4439 int i, j, r, lasti = tgsi_last_instruction(write_mask);
4440 int use_tmp = 0;
4441 int swizzle_x = inst->Src[0].Register.SwizzleX;
4442
4443 if (singledest) {
4444 switch (write_mask) {
4445 case 0x1:
4446 if (swizzle_x == 2) {
4447 write_mask = 0xc;
4448 use_tmp = 3;
4449 } else
4450 write_mask = 0x3;
4451 break;
4452 case 0x2:
4453 if (swizzle_x == 2) {
4454 write_mask = 0xc;
4455 use_tmp = 3;
4456 } else {
4457 write_mask = 0x3;
4458 use_tmp = 1;
4459 }
4460 break;
4461 case 0x4:
4462 if (swizzle_x == 0) {
4463 write_mask = 0x3;
4464 use_tmp = 1;
4465 } else
4466 write_mask = 0xc;
4467 break;
4468 case 0x8:
4469 if (swizzle_x == 0) {
4470 write_mask = 0x3;
4471 use_tmp = 1;
4472 } else {
4473 write_mask = 0xc;
4474 use_tmp = 3;
4475 }
4476 break;
4477 }
4478 }
4479
4480 lasti = tgsi_last_instruction(write_mask);
4481 for (i = 0; i <= lasti; i++) {
4482
4483 if (!(write_mask & (1 << i)))
4484 continue;
4485
4486 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4487
4488 if (singledest) {
4489 if (use_tmp || dest_temp) {
4490 alu.dst.sel = use_tmp ? ctx->temp_reg : dest_temp;
4491 alu.dst.chan = i;
4492 alu.dst.write = 1;
4493 } else {
4494 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4495 }
4496 if (i == 1 || i == 3)
4497 alu.dst.write = 0;
4498 } else
4499 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4500
4501 alu.op = op_override ? op_override : ctx->inst_info->op;
4502 if (ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DABS) {
4503 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4504 } else if (!swap) {
4505 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4506 r600_bytecode_src(&alu.src[j], &ctx->src[j], fp64_switch(i));
4507 }
4508 } else {
4509 r600_bytecode_src(&alu.src[0], &ctx->src[1], fp64_switch(i));
4510 r600_bytecode_src(&alu.src[1], &ctx->src[0], fp64_switch(i));
4511 }
4512
4513 /* handle some special cases */
4514 if (i == 1 || i == 3) {
4515 switch (ctx->parse.FullToken.FullInstruction.Instruction.Opcode) {
4516 case TGSI_OPCODE_DABS:
4517 r600_bytecode_src_set_abs(&alu.src[0]);
4518 break;
4519 default:
4520 break;
4521 }
4522 }
4523 if (i == lasti) {
4524 alu.last = 1;
4525 }
4526 r = r600_bytecode_add_alu(ctx->bc, &alu);
4527 if (r)
4528 return r;
4529 }
4530
4531 if (use_tmp) {
4532 write_mask = inst->Dst[0].Register.WriteMask;
4533
4534 lasti = tgsi_last_instruction(write_mask);
4535 /* move result from temp to dst */
4536 for (i = 0; i <= lasti; i++) {
4537 if (!(write_mask & (1 << i)))
4538 continue;
4539
4540 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4541 alu.op = ALU_OP1_MOV;
4542
4543 if (dest_temp) {
4544 alu.dst.sel = dest_temp;
4545 alu.dst.chan = i;
4546 alu.dst.write = 1;
4547 } else
4548 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4549 alu.src[0].sel = ctx->temp_reg;
4550 alu.src[0].chan = use_tmp - 1;
4551 alu.last = (i == lasti);
4552
4553 r = r600_bytecode_add_alu(ctx->bc, &alu);
4554 if (r)
4555 return r;
4556 }
4557 }
4558 return 0;
4559 }
4560
4561 static int tgsi_op2_64(struct r600_shader_ctx *ctx)
4562 {
4563 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4564 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4565 /* confirm writemasking */
4566 if ((write_mask & 0x3) != 0x3 &&
4567 (write_mask & 0xc) != 0xc) {
4568 fprintf(stderr, "illegal writemask for 64-bit: 0x%x\n", write_mask);
4569 return -1;
4570 }
4571 return tgsi_op2_64_params(ctx, false, false, 0, 0);
4572 }
4573
4574 static int tgsi_op2_64_single_dest(struct r600_shader_ctx *ctx)
4575 {
4576 return tgsi_op2_64_params(ctx, true, false, 0, 0);
4577 }
4578
4579 static int tgsi_op2_64_single_dest_s(struct r600_shader_ctx *ctx)
4580 {
4581 return tgsi_op2_64_params(ctx, true, true, 0, 0);
4582 }
4583
4584 static int tgsi_op3_64(struct r600_shader_ctx *ctx)
4585 {
4586 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4587 struct r600_bytecode_alu alu;
4588 int i, j, r;
4589 int lasti = 3;
4590 int tmp = r600_get_temp(ctx);
4591
4592 for (i = 0; i < lasti + 1; i++) {
4593
4594 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4595 alu.op = ctx->inst_info->op;
4596 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4597 r600_bytecode_src(&alu.src[j], &ctx->src[j], i == 3 ? 0 : 1);
4598 }
4599
4600 if (inst->Dst[0].Register.WriteMask & (1 << i))
4601 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4602 else
4603 alu.dst.sel = tmp;
4604
4605 alu.dst.chan = i;
4606 alu.is_op3 = 1;
4607 if (i == lasti) {
4608 alu.last = 1;
4609 }
4610 r = r600_bytecode_add_alu(ctx->bc, &alu);
4611 if (r)
4612 return r;
4613 }
4614 return 0;
4615 }
4616
4617 static int tgsi_op2_s(struct r600_shader_ctx *ctx, int swap, int trans_only)
4618 {
4619 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4620 struct r600_bytecode_alu alu;
4621 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4622 int i, j, r, lasti = tgsi_last_instruction(write_mask);
4623 /* use temp register if trans_only and more than one dst component */
4624 int use_tmp = trans_only && (write_mask ^ (1 << lasti));
4625 unsigned op = ctx->inst_info->op;
4626
4627 if (op == ALU_OP2_MUL_IEEE &&
4628 ctx->info.properties[TGSI_PROPERTY_MUL_ZERO_WINS])
4629 op = ALU_OP2_MUL;
4630
4631 for (i = 0; i <= lasti; i++) {
4632 if (!(write_mask & (1 << i)))
4633 continue;
4634
4635 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4636 if (use_tmp) {
4637 alu.dst.sel = ctx->temp_reg;
4638 alu.dst.chan = i;
4639 alu.dst.write = 1;
4640 } else
4641 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4642
4643 alu.op = op;
4644 if (!swap) {
4645 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4646 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
4647 }
4648 } else {
4649 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
4650 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4651 }
4652 if (i == lasti || trans_only) {
4653 alu.last = 1;
4654 }
4655 r = r600_bytecode_add_alu(ctx->bc, &alu);
4656 if (r)
4657 return r;
4658 }
4659
4660 if (use_tmp) {
4661 /* move result from temp to dst */
4662 for (i = 0; i <= lasti; i++) {
4663 if (!(write_mask & (1 << i)))
4664 continue;
4665
4666 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4667 alu.op = ALU_OP1_MOV;
4668 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4669 alu.src[0].sel = ctx->temp_reg;
4670 alu.src[0].chan = i;
4671 alu.last = (i == lasti);
4672
4673 r = r600_bytecode_add_alu(ctx->bc, &alu);
4674 if (r)
4675 return r;
4676 }
4677 }
4678 return 0;
4679 }
4680
4681 static int tgsi_op2(struct r600_shader_ctx *ctx)
4682 {
4683 return tgsi_op2_s(ctx, 0, 0);
4684 }
4685
4686 static int tgsi_op2_swap(struct r600_shader_ctx *ctx)
4687 {
4688 return tgsi_op2_s(ctx, 1, 0);
4689 }
4690
4691 static int tgsi_op2_trans(struct r600_shader_ctx *ctx)
4692 {
4693 return tgsi_op2_s(ctx, 0, 1);
4694 }
4695
4696 static int tgsi_ineg(struct r600_shader_ctx *ctx)
4697 {
4698 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4699 struct r600_bytecode_alu alu;
4700 int i, r;
4701 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4702
4703 for (i = 0; i < lasti + 1; i++) {
4704
4705 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4706 continue;
4707 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4708 alu.op = ctx->inst_info->op;
4709
4710 alu.src[0].sel = V_SQ_ALU_SRC_0;
4711
4712 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4713
4714 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4715
4716 if (i == lasti) {
4717 alu.last = 1;
4718 }
4719 r = r600_bytecode_add_alu(ctx->bc, &alu);
4720 if (r)
4721 return r;
4722 }
4723 return 0;
4724
4725 }
4726
4727 static int tgsi_dneg(struct r600_shader_ctx *ctx)
4728 {
4729 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4730 struct r600_bytecode_alu alu;
4731 int i, r;
4732 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4733
4734 for (i = 0; i < lasti + 1; i++) {
4735
4736 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4737 continue;
4738 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4739 alu.op = ALU_OP1_MOV;
4740
4741 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4742
4743 if (i == 1 || i == 3)
4744 r600_bytecode_src_toggle_neg(&alu.src[0]);
4745 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4746
4747 if (i == lasti) {
4748 alu.last = 1;
4749 }
4750 r = r600_bytecode_add_alu(ctx->bc, &alu);
4751 if (r)
4752 return r;
4753 }
4754 return 0;
4755
4756 }
4757
4758 static int tgsi_dfracexp(struct r600_shader_ctx *ctx)
4759 {
4760 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4761 struct r600_bytecode_alu alu;
4762 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4763 int i, j, r;
4764
4765 for (i = 0; i <= 3; i++) {
4766 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4767 alu.op = ctx->inst_info->op;
4768
4769 alu.dst.sel = ctx->temp_reg;
4770 alu.dst.chan = i;
4771 alu.dst.write = 1;
4772 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4773 r600_bytecode_src(&alu.src[j], &ctx->src[j], fp64_switch(i));
4774 }
4775
4776 if (i == 3)
4777 alu.last = 1;
4778
4779 r = r600_bytecode_add_alu(ctx->bc, &alu);
4780 if (r)
4781 return r;
4782 }
4783
4784 /* Replicate significand result across channels. */
4785 for (i = 0; i <= 3; i++) {
4786 if (!(write_mask & (1 << i)))
4787 continue;
4788
4789 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4790 alu.op = ALU_OP1_MOV;
4791 alu.src[0].chan = (i & 1) + 2;
4792 alu.src[0].sel = ctx->temp_reg;
4793
4794 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4795 alu.dst.write = 1;
4796 alu.last = 1;
4797 r = r600_bytecode_add_alu(ctx->bc, &alu);
4798 if (r)
4799 return r;
4800 }
4801
4802 for (i = 0; i <= 3; i++) {
4803 if (inst->Dst[1].Register.WriteMask & (1 << i)) {
4804 /* MOV third channels to writemask dst1 */
4805 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4806 alu.op = ALU_OP1_MOV;
4807 alu.src[0].chan = 1;
4808 alu.src[0].sel = ctx->temp_reg;
4809
4810 tgsi_dst(ctx, &inst->Dst[1], i, &alu.dst);
4811 alu.last = 1;
4812 r = r600_bytecode_add_alu(ctx->bc, &alu);
4813 if (r)
4814 return r;
4815 break;
4816 }
4817 }
4818 return 0;
4819 }
4820
4821
4822 static int egcm_int_to_double(struct r600_shader_ctx *ctx)
4823 {
4824 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4825 struct r600_bytecode_alu alu;
4826 int i, c, r;
4827 int write_mask = inst->Dst[0].Register.WriteMask;
4828 int temp_reg = r600_get_temp(ctx);
4829
4830 assert(inst->Instruction.Opcode == TGSI_OPCODE_I2D ||
4831 inst->Instruction.Opcode == TGSI_OPCODE_U2D);
4832
4833 for (c = 0; c < 2; c++) {
4834 int dchan = c * 2;
4835 if (write_mask & (0x3 << dchan)) {
4836 /* split into 24-bit int and 8-bit int */
4837 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4838 alu.op = ALU_OP2_AND_INT;
4839 alu.dst.sel = temp_reg;
4840 alu.dst.chan = dchan;
4841 r600_bytecode_src(&alu.src[0], &ctx->src[0], c);
4842 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
4843 alu.src[1].value = 0xffffff00;
4844 alu.dst.write = 1;
4845 r = r600_bytecode_add_alu(ctx->bc, &alu);
4846 if (r)
4847 return r;
4848
4849 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4850 alu.op = ALU_OP2_AND_INT;
4851 alu.dst.sel = temp_reg;
4852 alu.dst.chan = dchan + 1;
4853 r600_bytecode_src(&alu.src[0], &ctx->src[0], c);
4854 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
4855 alu.src[1].value = 0xff;
4856 alu.dst.write = 1;
4857 alu.last = 1;
4858 r = r600_bytecode_add_alu(ctx->bc, &alu);
4859 if (r)
4860 return r;
4861 }
4862 }
4863
4864 for (c = 0; c < 2; c++) {
4865 int dchan = c * 2;
4866 if (write_mask & (0x3 << dchan)) {
4867 for (i = dchan; i <= dchan + 1; i++) {
4868 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4869 alu.op = i == dchan ? ctx->inst_info->op : ALU_OP1_UINT_TO_FLT;
4870
4871 alu.src[0].sel = temp_reg;
4872 alu.src[0].chan = i;
4873 alu.dst.sel = temp_reg;
4874 alu.dst.chan = i;
4875 alu.dst.write = 1;
4876 if (ctx->bc->chip_class == CAYMAN)
4877 alu.last = i == dchan + 1;
4878 else
4879 alu.last = 1; /* trans only ops on evergreen */
4880
4881 r = r600_bytecode_add_alu(ctx->bc, &alu);
4882 if (r)
4883 return r;
4884 }
4885 }
4886 }
4887
4888 for (c = 0; c < 2; c++) {
4889 int dchan = c * 2;
4890 if (write_mask & (0x3 << dchan)) {
4891 for (i = 0; i < 4; i++) {
4892 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4893 alu.op = ALU_OP1_FLT32_TO_FLT64;
4894
4895 alu.src[0].chan = dchan + (i / 2);
4896 if (i == 0 || i == 2)
4897 alu.src[0].sel = temp_reg;
4898 else {
4899 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
4900 alu.src[0].value = 0x0;
4901 }
4902 alu.dst.sel = ctx->temp_reg;
4903 alu.dst.chan = i;
4904 alu.last = i == 3;
4905 alu.dst.write = 1;
4906
4907 r = r600_bytecode_add_alu(ctx->bc, &alu);
4908 if (r)
4909 return r;
4910 }
4911
4912 for (i = 0; i <= 1; i++) {
4913 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4914 alu.op = ALU_OP2_ADD_64;
4915
4916 alu.src[0].chan = fp64_switch(i);
4917 alu.src[0].sel = ctx->temp_reg;
4918
4919 alu.src[1].chan = fp64_switch(i + 2);
4920 alu.src[1].sel = ctx->temp_reg;
4921 tgsi_dst(ctx, &inst->Dst[0], dchan + i, &alu.dst);
4922 alu.last = i == 1;
4923
4924 r = r600_bytecode_add_alu(ctx->bc, &alu);
4925 if (r)
4926 return r;
4927 }
4928 }
4929 }
4930
4931 return 0;
4932 }
4933
4934 static int egcm_double_to_int(struct r600_shader_ctx *ctx)
4935 {
4936 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4937 struct r600_bytecode_alu alu;
4938 int i, r;
4939 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4940 int treg = r600_get_temp(ctx);
4941 assert(inst->Instruction.Opcode == TGSI_OPCODE_D2I ||
4942 inst->Instruction.Opcode == TGSI_OPCODE_D2U);
4943
4944 /* do a 64->32 into a temp register */
4945 r = tgsi_op2_64_params(ctx, true, false, treg, ALU_OP1_FLT64_TO_FLT32);
4946 if (r)
4947 return r;
4948
4949 for (i = 0; i <= lasti; i++) {
4950 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4951 continue;
4952 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4953 alu.op = ctx->inst_info->op;
4954
4955 alu.src[0].chan = i;
4956 alu.src[0].sel = treg;
4957 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4958 alu.last = (i == lasti);
4959
4960 r = r600_bytecode_add_alu(ctx->bc, &alu);
4961 if (r)
4962 return r;
4963 }
4964
4965 return 0;
4966 }
4967
4968 static int cayman_emit_unary_double_raw(struct r600_bytecode *bc,
4969 unsigned op,
4970 int dst_reg,
4971 struct r600_shader_src *src,
4972 bool abs)
4973 {
4974 struct r600_bytecode_alu alu;
4975 const int last_slot = 3;
4976 int r;
4977
4978 /* these have to write the result to X/Y by the looks of it */
4979 for (int i = 0 ; i < last_slot; i++) {
4980 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4981 alu.op = op;
4982
4983 r600_bytecode_src(&alu.src[0], src, 1);
4984 r600_bytecode_src(&alu.src[1], src, 0);
4985
4986 if (abs)
4987 r600_bytecode_src_set_abs(&alu.src[1]);
4988
4989 alu.dst.sel = dst_reg;
4990 alu.dst.chan = i;
4991 alu.dst.write = (i == 0 || i == 1);
4992
4993 if (bc->chip_class != CAYMAN || i == last_slot - 1)
4994 alu.last = 1;
4995 r = r600_bytecode_add_alu(bc, &alu);
4996 if (r)
4997 return r;
4998 }
4999
5000 return 0;
5001 }
5002
5003 static int cayman_emit_double_instr(struct r600_shader_ctx *ctx)
5004 {
5005 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5006 int i, r;
5007 struct r600_bytecode_alu alu;
5008 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
5009 int t1 = ctx->temp_reg;
5010
5011 /* should only be one src regs */
5012 assert(inst->Instruction.NumSrcRegs == 1);
5013
5014 /* only support one double at a time */
5015 assert(inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ||
5016 inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_ZW);
5017
5018 r = cayman_emit_unary_double_raw(
5019 ctx->bc, ctx->inst_info->op, t1,
5020 &ctx->src[0],
5021 ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DRSQ ||
5022 ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DSQRT);
5023 if (r)
5024 return r;
5025
5026 for (i = 0 ; i <= lasti; i++) {
5027 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
5028 continue;
5029 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5030 alu.op = ALU_OP1_MOV;
5031 alu.src[0].sel = t1;
5032 alu.src[0].chan = (i == 0 || i == 2) ? 0 : 1;
5033 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5034 alu.dst.write = 1;
5035 if (i == lasti)
5036 alu.last = 1;
5037 r = r600_bytecode_add_alu(ctx->bc, &alu);
5038 if (r)
5039 return r;
5040 }
5041 return 0;
5042 }
5043
5044 static int cayman_emit_float_instr(struct r600_shader_ctx *ctx)
5045 {
5046 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5047 int i, j, r;
5048 struct r600_bytecode_alu alu;
5049 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
5050
5051 for (i = 0 ; i < last_slot; i++) {
5052 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5053 alu.op = ctx->inst_info->op;
5054 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
5055 r600_bytecode_src(&alu.src[j], &ctx->src[j], 0);
5056
5057 /* RSQ should take the absolute value of src */
5058 if (inst->Instruction.Opcode == TGSI_OPCODE_RSQ) {
5059 r600_bytecode_src_set_abs(&alu.src[j]);
5060 }
5061 }
5062 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5063 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
5064
5065 if (i == last_slot - 1)
5066 alu.last = 1;
5067 r = r600_bytecode_add_alu(ctx->bc, &alu);
5068 if (r)
5069 return r;
5070 }
5071 return 0;
5072 }
5073
5074 static int cayman_mul_int_instr(struct r600_shader_ctx *ctx)
5075 {
5076 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5077 int i, j, k, r;
5078 struct r600_bytecode_alu alu;
5079 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
5080 int t1 = ctx->temp_reg;
5081
5082 for (k = 0; k <= lasti; k++) {
5083 if (!(inst->Dst[0].Register.WriteMask & (1 << k)))
5084 continue;
5085
5086 for (i = 0 ; i < 4; i++) {
5087 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5088 alu.op = ctx->inst_info->op;
5089 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
5090 r600_bytecode_src(&alu.src[j], &ctx->src[j], k);
5091 }
5092 alu.dst.sel = t1;
5093 alu.dst.chan = i;
5094 alu.dst.write = (i == k);
5095 if (i == 3)
5096 alu.last = 1;
5097 r = r600_bytecode_add_alu(ctx->bc, &alu);
5098 if (r)
5099 return r;
5100 }
5101 }
5102
5103 for (i = 0 ; i <= lasti; i++) {
5104 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
5105 continue;
5106 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5107 alu.op = ALU_OP1_MOV;
5108 alu.src[0].sel = t1;
5109 alu.src[0].chan = i;
5110 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5111 alu.dst.write = 1;
5112 if (i == lasti)
5113 alu.last = 1;
5114 r = r600_bytecode_add_alu(ctx->bc, &alu);
5115 if (r)
5116 return r;
5117 }
5118
5119 return 0;
5120 }
5121
5122
5123 static int cayman_mul_double_instr(struct r600_shader_ctx *ctx)
5124 {
5125 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5126 int i, j, k, r;
5127 struct r600_bytecode_alu alu;
5128 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
5129 int t1 = ctx->temp_reg;
5130
5131 /* t1 would get overwritten below if we actually tried to
5132 * multiply two pairs of doubles at a time. */
5133 assert(inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ||
5134 inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_ZW);
5135
5136 k = inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ? 0 : 1;
5137
5138 for (i = 0; i < 4; i++) {
5139 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5140 alu.op = ctx->inst_info->op;
5141 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
5142 r600_bytecode_src(&alu.src[j], &ctx->src[j], k * 2 + ((i == 3) ? 0 : 1));
5143 }
5144 alu.dst.sel = t1;
5145 alu.dst.chan = i;
5146 alu.dst.write = 1;
5147 if (i == 3)
5148 alu.last = 1;
5149 r = r600_bytecode_add_alu(ctx->bc, &alu);
5150 if (r)
5151 return r;
5152 }
5153
5154 for (i = 0; i <= lasti; i++) {
5155 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
5156 continue;
5157 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5158 alu.op = ALU_OP1_MOV;
5159 alu.src[0].sel = t1;
5160 alu.src[0].chan = i;
5161 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5162 alu.dst.write = 1;
5163 if (i == lasti)
5164 alu.last = 1;
5165 r = r600_bytecode_add_alu(ctx->bc, &alu);
5166 if (r)
5167 return r;
5168 }
5169
5170 return 0;
5171 }
5172
5173 /*
5174 * Emit RECIP_64 + MUL_64 to implement division.
5175 */
5176 static int cayman_ddiv_instr(struct r600_shader_ctx *ctx)
5177 {
5178 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5179 int r;
5180 struct r600_bytecode_alu alu;
5181 int t1 = ctx->temp_reg;
5182 int k;
5183
5184 /* Only support one double at a time. This is the same constraint as
5185 * in DMUL lowering. */
5186 assert(inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ||
5187 inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_ZW);
5188
5189 k = inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ? 0 : 1;
5190
5191 r = cayman_emit_unary_double_raw(ctx->bc, ALU_OP2_RECIP_64, t1, &ctx->src[1], false);
5192 if (r)
5193 return r;
5194
5195 for (int i = 0; i < 4; i++) {
5196 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5197 alu.op = ALU_OP2_MUL_64;
5198
5199 r600_bytecode_src(&alu.src[0], &ctx->src[0], k * 2 + ((i == 3) ? 0 : 1));
5200
5201 alu.src[1].sel = t1;
5202 alu.src[1].chan = (i == 3) ? 0 : 1;
5203
5204 alu.dst.sel = t1;
5205 alu.dst.chan = i;
5206 alu.dst.write = 1;
5207 if (i == 3)
5208 alu.last = 1;
5209 r = r600_bytecode_add_alu(ctx->bc, &alu);
5210 if (r)
5211 return r;
5212 }
5213
5214 for (int i = 0; i < 2; i++) {
5215 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5216 alu.op = ALU_OP1_MOV;
5217 alu.src[0].sel = t1;
5218 alu.src[0].chan = i;
5219 tgsi_dst(ctx, &inst->Dst[0], k * 2 + i, &alu.dst);
5220 alu.dst.write = 1;
5221 if (i == 1)
5222 alu.last = 1;
5223 r = r600_bytecode_add_alu(ctx->bc, &alu);
5224 if (r)
5225 return r;
5226 }
5227 return 0;
5228 }
5229
5230 /*
5231 * r600 - trunc to -PI..PI range
5232 * r700 - normalize by dividing by 2PI
5233 * see fdo bug 27901
5234 */
5235 static int tgsi_setup_trig(struct r600_shader_ctx *ctx)
5236 {
5237 int r;
5238 struct r600_bytecode_alu alu;
5239
5240 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5241 alu.op = ALU_OP3_MULADD;
5242 alu.is_op3 = 1;
5243
5244 alu.dst.chan = 0;
5245 alu.dst.sel = ctx->temp_reg;
5246 alu.dst.write = 1;
5247
5248 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5249
5250 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
5251 alu.src[1].chan = 0;
5252 alu.src[1].value = u_bitcast_f2u(0.5f * M_1_PI);
5253 alu.src[2].sel = V_SQ_ALU_SRC_0_5;
5254 alu.src[2].chan = 0;
5255 alu.last = 1;
5256 r = r600_bytecode_add_alu(ctx->bc, &alu);
5257 if (r)
5258 return r;
5259
5260 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5261 alu.op = ALU_OP1_FRACT;
5262
5263 alu.dst.chan = 0;
5264 alu.dst.sel = ctx->temp_reg;
5265 alu.dst.write = 1;
5266
5267 alu.src[0].sel = ctx->temp_reg;
5268 alu.src[0].chan = 0;
5269 alu.last = 1;
5270 r = r600_bytecode_add_alu(ctx->bc, &alu);
5271 if (r)
5272 return r;
5273
5274 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5275 alu.op = ALU_OP3_MULADD;
5276 alu.is_op3 = 1;
5277
5278 alu.dst.chan = 0;
5279 alu.dst.sel = ctx->temp_reg;
5280 alu.dst.write = 1;
5281
5282 alu.src[0].sel = ctx->temp_reg;
5283 alu.src[0].chan = 0;
5284
5285 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
5286 alu.src[1].chan = 0;
5287 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
5288 alu.src[2].chan = 0;
5289
5290 if (ctx->bc->chip_class == R600) {
5291 alu.src[1].value = u_bitcast_f2u(2.0f * M_PI);
5292 alu.src[2].value = u_bitcast_f2u(-M_PI);
5293 } else {
5294 alu.src[1].sel = V_SQ_ALU_SRC_1;
5295 alu.src[2].sel = V_SQ_ALU_SRC_0_5;
5296 alu.src[2].neg = 1;
5297 }
5298
5299 alu.last = 1;
5300 r = r600_bytecode_add_alu(ctx->bc, &alu);
5301 if (r)
5302 return r;
5303 return 0;
5304 }
5305
5306 static int cayman_trig(struct r600_shader_ctx *ctx)
5307 {
5308 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5309 struct r600_bytecode_alu alu;
5310 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
5311 int i, r;
5312
5313 r = tgsi_setup_trig(ctx);
5314 if (r)
5315 return r;
5316
5317
5318 for (i = 0; i < last_slot; i++) {
5319 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5320 alu.op = ctx->inst_info->op;
5321 alu.dst.chan = i;
5322
5323 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5324 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
5325
5326 alu.src[0].sel = ctx->temp_reg;
5327 alu.src[0].chan = 0;
5328 if (i == last_slot - 1)
5329 alu.last = 1;
5330 r = r600_bytecode_add_alu(ctx->bc, &alu);
5331 if (r)
5332 return r;
5333 }
5334 return 0;
5335 }
5336
5337 static int tgsi_trig(struct r600_shader_ctx *ctx)
5338 {
5339 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5340 struct r600_bytecode_alu alu;
5341 int i, r;
5342 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
5343
5344 r = tgsi_setup_trig(ctx);
5345 if (r)
5346 return r;
5347
5348 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5349 alu.op = ctx->inst_info->op;
5350 alu.dst.chan = 0;
5351 alu.dst.sel = ctx->temp_reg;
5352 alu.dst.write = 1;
5353
5354 alu.src[0].sel = ctx->temp_reg;
5355 alu.src[0].chan = 0;
5356 alu.last = 1;
5357 r = r600_bytecode_add_alu(ctx->bc, &alu);
5358 if (r)
5359 return r;
5360
5361 /* replicate result */
5362 for (i = 0; i < lasti + 1; i++) {
5363 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
5364 continue;
5365
5366 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5367 alu.op = ALU_OP1_MOV;
5368
5369 alu.src[0].sel = ctx->temp_reg;
5370 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5371 if (i == lasti)
5372 alu.last = 1;
5373 r = r600_bytecode_add_alu(ctx->bc, &alu);
5374 if (r)
5375 return r;
5376 }
5377 return 0;
5378 }
5379
5380 static int tgsi_kill(struct r600_shader_ctx *ctx)
5381 {
5382 const struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5383 struct r600_bytecode_alu alu;
5384 int i, r;
5385
5386 for (i = 0; i < 4; i++) {
5387 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5388 alu.op = ctx->inst_info->op;
5389
5390 alu.dst.chan = i;
5391
5392 alu.src[0].sel = V_SQ_ALU_SRC_0;
5393
5394 if (inst->Instruction.Opcode == TGSI_OPCODE_KILL) {
5395 alu.src[1].sel = V_SQ_ALU_SRC_1;
5396 alu.src[1].neg = 1;
5397 } else {
5398 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
5399 }
5400 if (i == 3) {
5401 alu.last = 1;
5402 }
5403 r = r600_bytecode_add_alu(ctx->bc, &alu);
5404 if (r)
5405 return r;
5406 }
5407
5408 /* kill must be last in ALU */
5409 ctx->bc->force_add_cf = 1;
5410 ctx->shader->uses_kill = TRUE;
5411 return 0;
5412 }
5413
5414 static int tgsi_lit(struct r600_shader_ctx *ctx)
5415 {
5416 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5417 struct r600_bytecode_alu alu;
5418 int r;
5419
5420 /* tmp.x = max(src.y, 0.0) */
5421 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5422 alu.op = ALU_OP2_MAX;
5423 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
5424 alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/
5425 alu.src[1].chan = 1;
5426
5427 alu.dst.sel = ctx->temp_reg;
5428 alu.dst.chan = 0;
5429 alu.dst.write = 1;
5430
5431 alu.last = 1;
5432 r = r600_bytecode_add_alu(ctx->bc, &alu);
5433 if (r)
5434 return r;
5435
5436 if (inst->Dst[0].Register.WriteMask & (1 << 2))
5437 {
5438 int chan;
5439 int sel;
5440 unsigned i;
5441
5442 if (ctx->bc->chip_class == CAYMAN) {
5443 for (i = 0; i < 3; i++) {
5444 /* tmp.z = log(tmp.x) */
5445 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5446 alu.op = ALU_OP1_LOG_CLAMPED;
5447 alu.src[0].sel = ctx->temp_reg;
5448 alu.src[0].chan = 0;
5449 alu.dst.sel = ctx->temp_reg;
5450 alu.dst.chan = i;
5451 if (i == 2) {
5452 alu.dst.write = 1;
5453 alu.last = 1;
5454 } else
5455 alu.dst.write = 0;
5456
5457 r = r600_bytecode_add_alu(ctx->bc, &alu);
5458 if (r)
5459 return r;
5460 }
5461 } else {
5462 /* tmp.z = log(tmp.x) */
5463 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5464 alu.op = ALU_OP1_LOG_CLAMPED;
5465 alu.src[0].sel = ctx->temp_reg;
5466 alu.src[0].chan = 0;
5467 alu.dst.sel = ctx->temp_reg;
5468 alu.dst.chan = 2;
5469 alu.dst.write = 1;
5470 alu.last = 1;
5471 r = r600_bytecode_add_alu(ctx->bc, &alu);
5472 if (r)
5473 return r;
5474 }
5475
5476 chan = alu.dst.chan;
5477 sel = alu.dst.sel;
5478
5479 /* tmp.x = amd MUL_LIT(tmp.z, src.w, src.x ) */
5480 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5481 alu.op = ALU_OP3_MUL_LIT;
5482 alu.src[0].sel = sel;
5483 alu.src[0].chan = chan;
5484 r600_bytecode_src(&alu.src[1], &ctx->src[0], 3);
5485 r600_bytecode_src(&alu.src[2], &ctx->src[0], 0);
5486 alu.dst.sel = ctx->temp_reg;
5487 alu.dst.chan = 0;
5488 alu.dst.write = 1;
5489 alu.is_op3 = 1;
5490 alu.last = 1;
5491 r = r600_bytecode_add_alu(ctx->bc, &alu);
5492 if (r)
5493 return r;
5494
5495 if (ctx->bc->chip_class == CAYMAN) {
5496 for (i = 0; i < 3; i++) {
5497 /* dst.z = exp(tmp.x) */
5498 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5499 alu.op = ALU_OP1_EXP_IEEE;
5500 alu.src[0].sel = ctx->temp_reg;
5501 alu.src[0].chan = 0;
5502 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5503 if (i == 2) {
5504 alu.dst.write = 1;
5505 alu.last = 1;
5506 } else
5507 alu.dst.write = 0;
5508 r = r600_bytecode_add_alu(ctx->bc, &alu);
5509 if (r)
5510 return r;
5511 }
5512 } else {
5513 /* dst.z = exp(tmp.x) */
5514 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5515 alu.op = ALU_OP1_EXP_IEEE;
5516 alu.src[0].sel = ctx->temp_reg;
5517 alu.src[0].chan = 0;
5518 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
5519 alu.last = 1;
5520 r = r600_bytecode_add_alu(ctx->bc, &alu);
5521 if (r)
5522 return r;
5523 }
5524 }
5525
5526 /* dst.x, <- 1.0 */
5527 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5528 alu.op = ALU_OP1_MOV;
5529 alu.src[0].sel = V_SQ_ALU_SRC_1; /*1.0*/
5530 alu.src[0].chan = 0;
5531 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
5532 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 0) & 1;
5533 r = r600_bytecode_add_alu(ctx->bc, &alu);
5534 if (r)
5535 return r;
5536
5537 /* dst.y = max(src.x, 0.0) */
5538 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5539 alu.op = ALU_OP2_MAX;
5540 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5541 alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/
5542 alu.src[1].chan = 0;
5543 tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
5544 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 1) & 1;
5545 r = r600_bytecode_add_alu(ctx->bc, &alu);
5546 if (r)
5547 return r;
5548
5549 /* dst.w, <- 1.0 */
5550 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5551 alu.op = ALU_OP1_MOV;
5552 alu.src[0].sel = V_SQ_ALU_SRC_1;
5553 alu.src[0].chan = 0;
5554 tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
5555 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 3) & 1;
5556 alu.last = 1;
5557 r = r600_bytecode_add_alu(ctx->bc, &alu);
5558 if (r)
5559 return r;
5560
5561 return 0;
5562 }
5563
5564 static int tgsi_rsq(struct r600_shader_ctx *ctx)
5565 {
5566 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5567 struct r600_bytecode_alu alu;
5568 int i, r;
5569
5570 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5571
5572 alu.op = ALU_OP1_RECIPSQRT_IEEE;
5573
5574 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
5575 r600_bytecode_src(&alu.src[i], &ctx->src[i], 0);
5576 r600_bytecode_src_set_abs(&alu.src[i]);
5577 }
5578 alu.dst.sel = ctx->temp_reg;
5579 alu.dst.write = 1;
5580 alu.last = 1;
5581 r = r600_bytecode_add_alu(ctx->bc, &alu);
5582 if (r)
5583 return r;
5584 /* replicate result */
5585 return tgsi_helper_tempx_replicate(ctx);
5586 }
5587
5588 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx)
5589 {
5590 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5591 struct r600_bytecode_alu alu;
5592 int i, r;
5593
5594 for (i = 0; i < 4; i++) {
5595 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5596 alu.src[0].sel = ctx->temp_reg;
5597 alu.op = ALU_OP1_MOV;
5598 alu.dst.chan = i;
5599 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5600 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
5601 if (i == 3)
5602 alu.last = 1;
5603 r = r600_bytecode_add_alu(ctx->bc, &alu);
5604 if (r)
5605 return r;
5606 }
5607 return 0;
5608 }
5609
5610 static int tgsi_trans_srcx_replicate(struct r600_shader_ctx *ctx)
5611 {
5612 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5613 struct r600_bytecode_alu alu;
5614 int i, r;
5615
5616 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5617 alu.op = ctx->inst_info->op;
5618 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
5619 r600_bytecode_src(&alu.src[i], &ctx->src[i], 0);
5620 }
5621 alu.dst.sel = ctx->temp_reg;
5622 alu.dst.write = 1;
5623 alu.last = 1;
5624 r = r600_bytecode_add_alu(ctx->bc, &alu);
5625 if (r)
5626 return r;
5627 /* replicate result */
5628 return tgsi_helper_tempx_replicate(ctx);
5629 }
5630
5631 static int cayman_pow(struct r600_shader_ctx *ctx)
5632 {
5633 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5634 int i, r;
5635 struct r600_bytecode_alu alu;
5636 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
5637
5638 for (i = 0; i < 3; i++) {
5639 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5640 alu.op = ALU_OP1_LOG_IEEE;
5641 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5642 alu.dst.sel = ctx->temp_reg;
5643 alu.dst.chan = i;
5644 alu.dst.write = 1;
5645 if (i == 2)
5646 alu.last = 1;
5647 r = r600_bytecode_add_alu(ctx->bc, &alu);
5648 if (r)
5649 return r;
5650 }
5651
5652 /* b * LOG2(a) */
5653 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5654 alu.op = ALU_OP2_MUL;
5655 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
5656 alu.src[1].sel = ctx->temp_reg;
5657 alu.dst.sel = ctx->temp_reg;
5658 alu.dst.write = 1;
5659 alu.last = 1;
5660 r = r600_bytecode_add_alu(ctx->bc, &alu);
5661 if (r)
5662 return r;
5663
5664 for (i = 0; i < last_slot; i++) {
5665 /* POW(a,b) = EXP2(b * LOG2(a))*/
5666 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5667 alu.op = ALU_OP1_EXP_IEEE;
5668 alu.src[0].sel = ctx->temp_reg;
5669
5670 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5671 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
5672 if (i == last_slot - 1)
5673 alu.last = 1;
5674 r = r600_bytecode_add_alu(ctx->bc, &alu);
5675 if (r)
5676 return r;
5677 }
5678 return 0;
5679 }
5680
5681 static int tgsi_pow(struct r600_shader_ctx *ctx)
5682 {
5683 struct r600_bytecode_alu alu;
5684 int r;
5685
5686 /* LOG2(a) */
5687 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5688 alu.op = ALU_OP1_LOG_IEEE;
5689 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5690 alu.dst.sel = ctx->temp_reg;
5691 alu.dst.write = 1;
5692 alu.last = 1;
5693 r = r600_bytecode_add_alu(ctx->bc, &alu);
5694 if (r)
5695 return r;
5696 /* b * LOG2(a) */
5697 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5698 alu.op = ALU_OP2_MUL;
5699 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
5700 alu.src[1].sel = ctx->temp_reg;
5701 alu.dst.sel = ctx->temp_reg;
5702 alu.dst.write = 1;
5703 alu.last = 1;
5704 r = r600_bytecode_add_alu(ctx->bc, &alu);
5705 if (r)
5706 return r;
5707 /* POW(a,b) = EXP2(b * LOG2(a))*/
5708 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5709 alu.op = ALU_OP1_EXP_IEEE;
5710 alu.src[0].sel = ctx->temp_reg;
5711 alu.dst.sel = ctx->temp_reg;
5712 alu.dst.write = 1;
5713 alu.last = 1;
5714 r = r600_bytecode_add_alu(ctx->bc, &alu);
5715 if (r)
5716 return r;
5717 return tgsi_helper_tempx_replicate(ctx);
5718 }
5719
5720 static int emit_mul_int_op(struct r600_bytecode *bc,
5721 struct r600_bytecode_alu *alu_src)
5722 {
5723 struct r600_bytecode_alu alu;
5724 int i, r;
5725 alu = *alu_src;
5726 if (bc->chip_class == CAYMAN) {
5727 for (i = 0; i < 4; i++) {
5728 alu.dst.chan = i;
5729 alu.dst.write = (i == alu_src->dst.chan);
5730 alu.last = (i == 3);
5731
5732 r = r600_bytecode_add_alu(bc, &alu);
5733 if (r)
5734 return r;
5735 }
5736 } else {
5737 alu.last = 1;
5738 r = r600_bytecode_add_alu(bc, &alu);
5739 if (r)
5740 return r;
5741 }
5742 return 0;
5743 }
5744
5745 static int tgsi_divmod(struct r600_shader_ctx *ctx, int mod, int signed_op)
5746 {
5747 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5748 struct r600_bytecode_alu alu;
5749 int i, r, j;
5750 unsigned write_mask = inst->Dst[0].Register.WriteMask;
5751 int lasti = tgsi_last_instruction(write_mask);
5752 int tmp0 = ctx->temp_reg;
5753 int tmp1 = r600_get_temp(ctx);
5754 int tmp2 = r600_get_temp(ctx);
5755 int tmp3 = r600_get_temp(ctx);
5756 int tmp4 = 0;
5757
5758 /* Use additional temp if dst register and src register are the same */
5759 if (inst->Src[0].Register.Index == inst->Dst[0].Register.Index ||
5760 inst->Src[1].Register.Index == inst->Dst[0].Register.Index) {
5761 tmp4 = r600_get_temp(ctx);
5762 }
5763
5764 /* Unsigned path:
5765 *
5766 * we need to represent src1 as src2*q + r, where q - quotient, r - remainder
5767 *
5768 * 1. tmp0.x = rcp (src2) = 2^32/src2 + e, where e is rounding error
5769 * 2. tmp0.z = lo (tmp0.x * src2)
5770 * 3. tmp0.w = -tmp0.z
5771 * 4. tmp0.y = hi (tmp0.x * src2)
5772 * 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src2))
5773 * 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error
5774 * 7. tmp1.x = tmp0.x - tmp0.w
5775 * 8. tmp1.y = tmp0.x + tmp0.w
5776 * 9. tmp0.x = (tmp0.y == 0 ? tmp1.y : tmp1.x)
5777 * 10. tmp0.z = hi(tmp0.x * src1) = q
5778 * 11. tmp0.y = lo (tmp0.z * src2) = src2*q = src1 - r
5779 *
5780 * 12. tmp0.w = src1 - tmp0.y = r
5781 * 13. tmp1.x = tmp0.w >= src2 = r >= src2 (uint comparison)
5782 * 14. tmp1.y = src1 >= tmp0.y = r >= 0 (uint comparison)
5783 *
5784 * if DIV
5785 *
5786 * 15. tmp1.z = tmp0.z + 1 = q + 1
5787 * 16. tmp1.w = tmp0.z - 1 = q - 1
5788 *
5789 * else MOD
5790 *
5791 * 15. tmp1.z = tmp0.w - src2 = r - src2
5792 * 16. tmp1.w = tmp0.w + src2 = r + src2
5793 *
5794 * endif
5795 *
5796 * 17. tmp1.x = tmp1.x & tmp1.y
5797 *
5798 * DIV: 18. tmp0.z = tmp1.x==0 ? tmp0.z : tmp1.z
5799 * MOD: 18. tmp0.z = tmp1.x==0 ? tmp0.w : tmp1.z
5800 *
5801 * 19. tmp0.z = tmp1.y==0 ? tmp1.w : tmp0.z
5802 * 20. dst = src2==0 ? MAX_UINT : tmp0.z
5803 *
5804 * Signed path:
5805 *
5806 * Same as unsigned, using abs values of the operands,
5807 * and fixing the sign of the result in the end.
5808 */
5809
5810 for (i = 0; i < 4; i++) {
5811 if (!(write_mask & (1<<i)))
5812 continue;
5813
5814 if (signed_op) {
5815
5816 /* tmp2.x = -src0 */
5817 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5818 alu.op = ALU_OP2_SUB_INT;
5819
5820 alu.dst.sel = tmp2;
5821 alu.dst.chan = 0;
5822 alu.dst.write = 1;
5823
5824 alu.src[0].sel = V_SQ_ALU_SRC_0;
5825
5826 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
5827
5828 alu.last = 1;
5829 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5830 return r;
5831
5832 /* tmp2.y = -src1 */
5833 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5834 alu.op = ALU_OP2_SUB_INT;
5835
5836 alu.dst.sel = tmp2;
5837 alu.dst.chan = 1;
5838 alu.dst.write = 1;
5839
5840 alu.src[0].sel = V_SQ_ALU_SRC_0;
5841
5842 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5843
5844 alu.last = 1;
5845 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5846 return r;
5847
5848 /* tmp2.z sign bit is set if src0 and src2 signs are different */
5849 /* it will be a sign of the quotient */
5850 if (!mod) {
5851
5852 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5853 alu.op = ALU_OP2_XOR_INT;
5854
5855 alu.dst.sel = tmp2;
5856 alu.dst.chan = 2;
5857 alu.dst.write = 1;
5858
5859 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5860 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5861
5862 alu.last = 1;
5863 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5864 return r;
5865 }
5866
5867 /* tmp2.x = |src0| */
5868 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5869 alu.op = ALU_OP3_CNDGE_INT;
5870 alu.is_op3 = 1;
5871
5872 alu.dst.sel = tmp2;
5873 alu.dst.chan = 0;
5874 alu.dst.write = 1;
5875
5876 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5877 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
5878 alu.src[2].sel = tmp2;
5879 alu.src[2].chan = 0;
5880
5881 alu.last = 1;
5882 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5883 return r;
5884
5885 /* tmp2.y = |src1| */
5886 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5887 alu.op = ALU_OP3_CNDGE_INT;
5888 alu.is_op3 = 1;
5889
5890 alu.dst.sel = tmp2;
5891 alu.dst.chan = 1;
5892 alu.dst.write = 1;
5893
5894 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
5895 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5896 alu.src[2].sel = tmp2;
5897 alu.src[2].chan = 1;
5898
5899 alu.last = 1;
5900 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5901 return r;
5902
5903 }
5904
5905 /* 1. tmp0.x = rcp_u (src2) = 2^32/src2 + e, where e is rounding error */
5906 if (ctx->bc->chip_class == CAYMAN) {
5907 /* tmp3.x = u2f(src2) */
5908 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5909 alu.op = ALU_OP1_UINT_TO_FLT;
5910
5911 alu.dst.sel = tmp3;
5912 alu.dst.chan = 0;
5913 alu.dst.write = 1;
5914
5915 if (signed_op) {
5916 alu.src[0].sel = tmp2;
5917 alu.src[0].chan = 1;
5918 } else {
5919 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
5920 }
5921
5922 alu.last = 1;
5923 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5924 return r;
5925
5926 /* tmp0.x = recip(tmp3.x) */
5927 for (j = 0 ; j < 3; j++) {
5928 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5929 alu.op = ALU_OP1_RECIP_IEEE;
5930
5931 alu.dst.sel = tmp0;
5932 alu.dst.chan = j;
5933 alu.dst.write = (j == 0);
5934
5935 alu.src[0].sel = tmp3;
5936 alu.src[0].chan = 0;
5937
5938 if (j == 2)
5939 alu.last = 1;
5940 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5941 return r;
5942 }
5943
5944 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5945 alu.op = ALU_OP2_MUL;
5946
5947 alu.src[0].sel = tmp0;
5948 alu.src[0].chan = 0;
5949
5950 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
5951 alu.src[1].value = 0x4f800000;
5952
5953 alu.dst.sel = tmp3;
5954 alu.dst.write = 1;
5955 alu.last = 1;
5956 r = r600_bytecode_add_alu(ctx->bc, &alu);
5957 if (r)
5958 return r;
5959
5960 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5961 alu.op = ALU_OP1_FLT_TO_UINT;
5962
5963 alu.dst.sel = tmp0;
5964 alu.dst.chan = 0;
5965 alu.dst.write = 1;
5966
5967 alu.src[0].sel = tmp3;
5968 alu.src[0].chan = 0;
5969
5970 alu.last = 1;
5971 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5972 return r;
5973
5974 } else {
5975 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5976 alu.op = ALU_OP1_RECIP_UINT;
5977
5978 alu.dst.sel = tmp0;
5979 alu.dst.chan = 0;
5980 alu.dst.write = 1;
5981
5982 if (signed_op) {
5983 alu.src[0].sel = tmp2;
5984 alu.src[0].chan = 1;
5985 } else {
5986 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
5987 }
5988
5989 alu.last = 1;
5990 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5991 return r;
5992 }
5993
5994 /* 2. tmp0.z = lo (tmp0.x * src2) */
5995 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5996 alu.op = ALU_OP2_MULLO_UINT;
5997
5998 alu.dst.sel = tmp0;
5999 alu.dst.chan = 2;
6000 alu.dst.write = 1;
6001
6002 alu.src[0].sel = tmp0;
6003 alu.src[0].chan = 0;
6004 if (signed_op) {
6005 alu.src[1].sel = tmp2;
6006 alu.src[1].chan = 1;
6007 } else {
6008 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
6009 }
6010
6011 if ((r = emit_mul_int_op(ctx->bc, &alu)))
6012 return r;
6013
6014 /* 3. tmp0.w = -tmp0.z */
6015 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6016 alu.op = ALU_OP2_SUB_INT;
6017
6018 alu.dst.sel = tmp0;
6019 alu.dst.chan = 3;
6020 alu.dst.write = 1;
6021
6022 alu.src[0].sel = V_SQ_ALU_SRC_0;
6023 alu.src[1].sel = tmp0;
6024 alu.src[1].chan = 2;
6025
6026 alu.last = 1;
6027 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6028 return r;
6029
6030 /* 4. tmp0.y = hi (tmp0.x * src2) */
6031 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6032 alu.op = ALU_OP2_MULHI_UINT;
6033
6034 alu.dst.sel = tmp0;
6035 alu.dst.chan = 1;
6036 alu.dst.write = 1;
6037
6038 alu.src[0].sel = tmp0;
6039 alu.src[0].chan = 0;
6040
6041 if (signed_op) {
6042 alu.src[1].sel = tmp2;
6043 alu.src[1].chan = 1;
6044 } else {
6045 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
6046 }
6047
6048 if ((r = emit_mul_int_op(ctx->bc, &alu)))
6049 return r;
6050
6051 /* 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src)) */
6052 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6053 alu.op = ALU_OP3_CNDE_INT;
6054 alu.is_op3 = 1;
6055
6056 alu.dst.sel = tmp0;
6057 alu.dst.chan = 2;
6058 alu.dst.write = 1;
6059
6060 alu.src[0].sel = tmp0;
6061 alu.src[0].chan = 1;
6062 alu.src[1].sel = tmp0;
6063 alu.src[1].chan = 3;
6064 alu.src[2].sel = tmp0;
6065 alu.src[2].chan = 2;
6066
6067 alu.last = 1;
6068 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6069 return r;
6070
6071 /* 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error */
6072 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6073 alu.op = ALU_OP2_MULHI_UINT;
6074
6075 alu.dst.sel = tmp0;
6076 alu.dst.chan = 3;
6077 alu.dst.write = 1;
6078
6079 alu.src[0].sel = tmp0;
6080 alu.src[0].chan = 2;
6081
6082 alu.src[1].sel = tmp0;
6083 alu.src[1].chan = 0;
6084
6085 if ((r = emit_mul_int_op(ctx->bc, &alu)))
6086 return r;
6087
6088 /* 7. tmp1.x = tmp0.x - tmp0.w */
6089 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6090 alu.op = ALU_OP2_SUB_INT;
6091
6092 alu.dst.sel = tmp1;
6093 alu.dst.chan = 0;
6094 alu.dst.write = 1;
6095
6096 alu.src[0].sel = tmp0;
6097 alu.src[0].chan = 0;
6098 alu.src[1].sel = tmp0;
6099 alu.src[1].chan = 3;
6100
6101 alu.last = 1;
6102 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6103 return r;
6104
6105 /* 8. tmp1.y = tmp0.x + tmp0.w */
6106 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6107 alu.op = ALU_OP2_ADD_INT;
6108
6109 alu.dst.sel = tmp1;
6110 alu.dst.chan = 1;
6111 alu.dst.write = 1;
6112
6113 alu.src[0].sel = tmp0;
6114 alu.src[0].chan = 0;
6115 alu.src[1].sel = tmp0;
6116 alu.src[1].chan = 3;
6117
6118 alu.last = 1;
6119 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6120 return r;
6121
6122 /* 9. tmp0.x = (tmp0.y == 0 ? tmp1.y : tmp1.x) */
6123 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6124 alu.op = ALU_OP3_CNDE_INT;
6125 alu.is_op3 = 1;
6126
6127 alu.dst.sel = tmp0;
6128 alu.dst.chan = 0;
6129 alu.dst.write = 1;
6130
6131 alu.src[0].sel = tmp0;
6132 alu.src[0].chan = 1;
6133 alu.src[1].sel = tmp1;
6134 alu.src[1].chan = 1;
6135 alu.src[2].sel = tmp1;
6136 alu.src[2].chan = 0;
6137
6138 alu.last = 1;
6139 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6140 return r;
6141
6142 /* 10. tmp0.z = hi(tmp0.x * src1) = q */
6143 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6144 alu.op = ALU_OP2_MULHI_UINT;
6145
6146 alu.dst.sel = tmp0;
6147 alu.dst.chan = 2;
6148 alu.dst.write = 1;
6149
6150 alu.src[0].sel = tmp0;
6151 alu.src[0].chan = 0;
6152
6153 if (signed_op) {
6154 alu.src[1].sel = tmp2;
6155 alu.src[1].chan = 0;
6156 } else {
6157 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
6158 }
6159
6160 if ((r = emit_mul_int_op(ctx->bc, &alu)))
6161 return r;
6162
6163 /* 11. tmp0.y = lo (src2 * tmp0.z) = src2*q = src1 - r */
6164 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6165 alu.op = ALU_OP2_MULLO_UINT;
6166
6167 alu.dst.sel = tmp0;
6168 alu.dst.chan = 1;
6169 alu.dst.write = 1;
6170
6171 if (signed_op) {
6172 alu.src[0].sel = tmp2;
6173 alu.src[0].chan = 1;
6174 } else {
6175 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
6176 }
6177
6178 alu.src[1].sel = tmp0;
6179 alu.src[1].chan = 2;
6180
6181 if ((r = emit_mul_int_op(ctx->bc, &alu)))
6182 return r;
6183
6184 /* 12. tmp0.w = src1 - tmp0.y = r */
6185 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6186 alu.op = ALU_OP2_SUB_INT;
6187
6188 alu.dst.sel = tmp0;
6189 alu.dst.chan = 3;
6190 alu.dst.write = 1;
6191
6192 if (signed_op) {
6193 alu.src[0].sel = tmp2;
6194 alu.src[0].chan = 0;
6195 } else {
6196 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6197 }
6198
6199 alu.src[1].sel = tmp0;
6200 alu.src[1].chan = 1;
6201
6202 alu.last = 1;
6203 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6204 return r;
6205
6206 /* 13. tmp1.x = tmp0.w >= src2 = r >= src2 */
6207 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6208 alu.op = ALU_OP2_SETGE_UINT;
6209
6210 alu.dst.sel = tmp1;
6211 alu.dst.chan = 0;
6212 alu.dst.write = 1;
6213
6214 alu.src[0].sel = tmp0;
6215 alu.src[0].chan = 3;
6216 if (signed_op) {
6217 alu.src[1].sel = tmp2;
6218 alu.src[1].chan = 1;
6219 } else {
6220 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
6221 }
6222
6223 alu.last = 1;
6224 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6225 return r;
6226
6227 /* 14. tmp1.y = src1 >= tmp0.y = r >= 0 */
6228 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6229 alu.op = ALU_OP2_SETGE_UINT;
6230
6231 alu.dst.sel = tmp1;
6232 alu.dst.chan = 1;
6233 alu.dst.write = 1;
6234
6235 if (signed_op) {
6236 alu.src[0].sel = tmp2;
6237 alu.src[0].chan = 0;
6238 } else {
6239 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6240 }
6241
6242 alu.src[1].sel = tmp0;
6243 alu.src[1].chan = 1;
6244
6245 alu.last = 1;
6246 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6247 return r;
6248
6249 if (mod) { /* UMOD */
6250
6251 /* 15. tmp1.z = tmp0.w - src2 = r - src2 */
6252 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6253 alu.op = ALU_OP2_SUB_INT;
6254
6255 alu.dst.sel = tmp1;
6256 alu.dst.chan = 2;
6257 alu.dst.write = 1;
6258
6259 alu.src[0].sel = tmp0;
6260 alu.src[0].chan = 3;
6261
6262 if (signed_op) {
6263 alu.src[1].sel = tmp2;
6264 alu.src[1].chan = 1;
6265 } else {
6266 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
6267 }
6268
6269 alu.last = 1;
6270 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6271 return r;
6272
6273 /* 16. tmp1.w = tmp0.w + src2 = r + src2 */
6274 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6275 alu.op = ALU_OP2_ADD_INT;
6276
6277 alu.dst.sel = tmp1;
6278 alu.dst.chan = 3;
6279 alu.dst.write = 1;
6280
6281 alu.src[0].sel = tmp0;
6282 alu.src[0].chan = 3;
6283 if (signed_op) {
6284 alu.src[1].sel = tmp2;
6285 alu.src[1].chan = 1;
6286 } else {
6287 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
6288 }
6289
6290 alu.last = 1;
6291 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6292 return r;
6293
6294 } else { /* UDIV */
6295
6296 /* 15. tmp1.z = tmp0.z + 1 = q + 1 DIV */
6297 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6298 alu.op = ALU_OP2_ADD_INT;
6299
6300 alu.dst.sel = tmp1;
6301 alu.dst.chan = 2;
6302 alu.dst.write = 1;
6303
6304 alu.src[0].sel = tmp0;
6305 alu.src[0].chan = 2;
6306 alu.src[1].sel = V_SQ_ALU_SRC_1_INT;
6307
6308 alu.last = 1;
6309 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6310 return r;
6311
6312 /* 16. tmp1.w = tmp0.z - 1 = q - 1 */
6313 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6314 alu.op = ALU_OP2_ADD_INT;
6315
6316 alu.dst.sel = tmp1;
6317 alu.dst.chan = 3;
6318 alu.dst.write = 1;
6319
6320 alu.src[0].sel = tmp0;
6321 alu.src[0].chan = 2;
6322 alu.src[1].sel = V_SQ_ALU_SRC_M_1_INT;
6323
6324 alu.last = 1;
6325 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6326 return r;
6327
6328 }
6329
6330 /* 17. tmp1.x = tmp1.x & tmp1.y */
6331 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6332 alu.op = ALU_OP2_AND_INT;
6333
6334 alu.dst.sel = tmp1;
6335 alu.dst.chan = 0;
6336 alu.dst.write = 1;
6337
6338 alu.src[0].sel = tmp1;
6339 alu.src[0].chan = 0;
6340 alu.src[1].sel = tmp1;
6341 alu.src[1].chan = 1;
6342
6343 alu.last = 1;
6344 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6345 return r;
6346
6347 /* 18. tmp0.z = tmp1.x==0 ? tmp0.z : tmp1.z DIV */
6348 /* 18. tmp0.z = tmp1.x==0 ? tmp0.w : tmp1.z MOD */
6349 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6350 alu.op = ALU_OP3_CNDE_INT;
6351 alu.is_op3 = 1;
6352
6353 alu.dst.sel = tmp0;
6354 alu.dst.chan = 2;
6355 alu.dst.write = 1;
6356
6357 alu.src[0].sel = tmp1;
6358 alu.src[0].chan = 0;
6359 alu.src[1].sel = tmp0;
6360 alu.src[1].chan = mod ? 3 : 2;
6361 alu.src[2].sel = tmp1;
6362 alu.src[2].chan = 2;
6363
6364 alu.last = 1;
6365 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6366 return r;
6367
6368 /* 19. tmp0.z = tmp1.y==0 ? tmp1.w : tmp0.z */
6369 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6370 alu.op = ALU_OP3_CNDE_INT;
6371 alu.is_op3 = 1;
6372
6373 if (signed_op) {
6374 alu.dst.sel = tmp0;
6375 alu.dst.chan = 2;
6376 alu.dst.write = 1;
6377 } else {
6378 if (tmp4 > 0) {
6379 alu.dst.sel = tmp4;
6380 alu.dst.chan = i;
6381 alu.dst.write = 1;
6382 } else {
6383 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6384 }
6385 }
6386
6387 alu.src[0].sel = tmp1;
6388 alu.src[0].chan = 1;
6389 alu.src[1].sel = tmp1;
6390 alu.src[1].chan = 3;
6391 alu.src[2].sel = tmp0;
6392 alu.src[2].chan = 2;
6393
6394 alu.last = 1;
6395 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6396 return r;
6397
6398 if (signed_op) {
6399
6400 /* fix the sign of the result */
6401
6402 if (mod) {
6403
6404 /* tmp0.x = -tmp0.z */
6405 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6406 alu.op = ALU_OP2_SUB_INT;
6407
6408 alu.dst.sel = tmp0;
6409 alu.dst.chan = 0;
6410 alu.dst.write = 1;
6411
6412 alu.src[0].sel = V_SQ_ALU_SRC_0;
6413 alu.src[1].sel = tmp0;
6414 alu.src[1].chan = 2;
6415
6416 alu.last = 1;
6417 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6418 return r;
6419
6420 /* sign of the remainder is the same as the sign of src0 */
6421 /* tmp0.x = src0>=0 ? tmp0.z : tmp0.x */
6422 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6423 alu.op = ALU_OP3_CNDGE_INT;
6424 alu.is_op3 = 1;
6425
6426 if (tmp4 > 0) {
6427 alu.dst.sel = tmp4;
6428 alu.dst.chan = i;
6429 alu.dst.write = 1;
6430 } else {
6431 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6432 }
6433
6434 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6435 alu.src[1].sel = tmp0;
6436 alu.src[1].chan = 2;
6437 alu.src[2].sel = tmp0;
6438 alu.src[2].chan = 0;
6439
6440 alu.last = 1;
6441 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6442 return r;
6443
6444 } else {
6445
6446 /* tmp0.x = -tmp0.z */
6447 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6448 alu.op = ALU_OP2_SUB_INT;
6449
6450 alu.dst.sel = tmp0;
6451 alu.dst.chan = 0;
6452 alu.dst.write = 1;
6453
6454 alu.src[0].sel = V_SQ_ALU_SRC_0;
6455 alu.src[1].sel = tmp0;
6456 alu.src[1].chan = 2;
6457
6458 alu.last = 1;
6459 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6460 return r;
6461
6462 /* fix the quotient sign (same as the sign of src0*src1) */
6463 /* tmp0.x = tmp2.z>=0 ? tmp0.z : tmp0.x */
6464 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6465 alu.op = ALU_OP3_CNDGE_INT;
6466 alu.is_op3 = 1;
6467
6468 if (tmp4 > 0) {
6469 alu.dst.sel = tmp4;
6470 alu.dst.chan = i;
6471 alu.dst.write = 1;
6472 } else {
6473 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6474 }
6475
6476 alu.src[0].sel = tmp2;
6477 alu.src[0].chan = 2;
6478 alu.src[1].sel = tmp0;
6479 alu.src[1].chan = 2;
6480 alu.src[2].sel = tmp0;
6481 alu.src[2].chan = 0;
6482
6483 alu.last = 1;
6484 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6485 return r;
6486 }
6487 }
6488 }
6489
6490 if (tmp4 > 0) {
6491 for (i = 0; i <= lasti; ++i) {
6492 if (!(write_mask & (1<<i)))
6493 continue;
6494
6495 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6496 alu.op = ALU_OP1_MOV;
6497 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6498 alu.src[0].sel = tmp4;
6499 alu.src[0].chan = i;
6500
6501 if (i == lasti)
6502 alu.last = 1;
6503 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
6504 return r;
6505 }
6506 }
6507
6508 return 0;
6509 }
6510
6511 static int tgsi_udiv(struct r600_shader_ctx *ctx)
6512 {
6513 return tgsi_divmod(ctx, 0, 0);
6514 }
6515
6516 static int tgsi_umod(struct r600_shader_ctx *ctx)
6517 {
6518 return tgsi_divmod(ctx, 1, 0);
6519 }
6520
6521 static int tgsi_idiv(struct r600_shader_ctx *ctx)
6522 {
6523 return tgsi_divmod(ctx, 0, 1);
6524 }
6525
6526 static int tgsi_imod(struct r600_shader_ctx *ctx)
6527 {
6528 return tgsi_divmod(ctx, 1, 1);
6529 }
6530
6531
6532 static int tgsi_f2i(struct r600_shader_ctx *ctx)
6533 {
6534 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6535 struct r600_bytecode_alu alu;
6536 int i, r;
6537 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6538 int last_inst = tgsi_last_instruction(write_mask);
6539
6540 for (i = 0; i < 4; i++) {
6541 if (!(write_mask & (1<<i)))
6542 continue;
6543
6544 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6545 alu.op = ALU_OP1_TRUNC;
6546
6547 alu.dst.sel = ctx->temp_reg;
6548 alu.dst.chan = i;
6549 alu.dst.write = 1;
6550
6551 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6552 if (i == last_inst)
6553 alu.last = 1;
6554 r = r600_bytecode_add_alu(ctx->bc, &alu);
6555 if (r)
6556 return r;
6557 }
6558
6559 for (i = 0; i < 4; i++) {
6560 if (!(write_mask & (1<<i)))
6561 continue;
6562
6563 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6564 alu.op = ctx->inst_info->op;
6565
6566 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6567
6568 alu.src[0].sel = ctx->temp_reg;
6569 alu.src[0].chan = i;
6570
6571 if (i == last_inst || alu.op == ALU_OP1_FLT_TO_UINT)
6572 alu.last = 1;
6573 r = r600_bytecode_add_alu(ctx->bc, &alu);
6574 if (r)
6575 return r;
6576 }
6577
6578 return 0;
6579 }
6580
6581 static int tgsi_iabs(struct r600_shader_ctx *ctx)
6582 {
6583 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6584 struct r600_bytecode_alu alu;
6585 int i, r;
6586 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6587 int last_inst = tgsi_last_instruction(write_mask);
6588
6589 /* tmp = -src */
6590 for (i = 0; i < 4; i++) {
6591 if (!(write_mask & (1<<i)))
6592 continue;
6593
6594 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6595 alu.op = ALU_OP2_SUB_INT;
6596
6597 alu.dst.sel = ctx->temp_reg;
6598 alu.dst.chan = i;
6599 alu.dst.write = 1;
6600
6601 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
6602 alu.src[0].sel = V_SQ_ALU_SRC_0;
6603
6604 if (i == last_inst)
6605 alu.last = 1;
6606 r = r600_bytecode_add_alu(ctx->bc, &alu);
6607 if (r)
6608 return r;
6609 }
6610
6611 /* dst = (src >= 0 ? src : tmp) */
6612 for (i = 0; i < 4; i++) {
6613 if (!(write_mask & (1<<i)))
6614 continue;
6615
6616 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6617 alu.op = ALU_OP3_CNDGE_INT;
6618 alu.is_op3 = 1;
6619 alu.dst.write = 1;
6620
6621 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6622
6623 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6624 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
6625 alu.src[2].sel = ctx->temp_reg;
6626 alu.src[2].chan = i;
6627
6628 if (i == last_inst)
6629 alu.last = 1;
6630 r = r600_bytecode_add_alu(ctx->bc, &alu);
6631 if (r)
6632 return r;
6633 }
6634 return 0;
6635 }
6636
6637 static int tgsi_issg(struct r600_shader_ctx *ctx)
6638 {
6639 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6640 struct r600_bytecode_alu alu;
6641 int i, r;
6642 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6643 int last_inst = tgsi_last_instruction(write_mask);
6644
6645 /* tmp = (src >= 0 ? src : -1) */
6646 for (i = 0; i < 4; i++) {
6647 if (!(write_mask & (1<<i)))
6648 continue;
6649
6650 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6651 alu.op = ALU_OP3_CNDGE_INT;
6652 alu.is_op3 = 1;
6653
6654 alu.dst.sel = ctx->temp_reg;
6655 alu.dst.chan = i;
6656 alu.dst.write = 1;
6657
6658 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6659 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
6660 alu.src[2].sel = V_SQ_ALU_SRC_M_1_INT;
6661
6662 if (i == last_inst)
6663 alu.last = 1;
6664 r = r600_bytecode_add_alu(ctx->bc, &alu);
6665 if (r)
6666 return r;
6667 }
6668
6669 /* dst = (tmp > 0 ? 1 : tmp) */
6670 for (i = 0; i < 4; i++) {
6671 if (!(write_mask & (1<<i)))
6672 continue;
6673
6674 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6675 alu.op = ALU_OP3_CNDGT_INT;
6676 alu.is_op3 = 1;
6677 alu.dst.write = 1;
6678
6679 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6680
6681 alu.src[0].sel = ctx->temp_reg;
6682 alu.src[0].chan = i;
6683
6684 alu.src[1].sel = V_SQ_ALU_SRC_1_INT;
6685
6686 alu.src[2].sel = ctx->temp_reg;
6687 alu.src[2].chan = i;
6688
6689 if (i == last_inst)
6690 alu.last = 1;
6691 r = r600_bytecode_add_alu(ctx->bc, &alu);
6692 if (r)
6693 return r;
6694 }
6695 return 0;
6696 }
6697
6698
6699
6700 static int tgsi_ssg(struct r600_shader_ctx *ctx)
6701 {
6702 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6703 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6704 int last_inst = tgsi_last_instruction(write_mask);
6705 struct r600_bytecode_alu alu;
6706 int i, r;
6707
6708 /* tmp = (src > 0 ? 1 : src) */
6709 for (i = 0; i <= last_inst; i++) {
6710 if (!(write_mask & (1 << i)))
6711 continue;
6712 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6713 alu.op = ALU_OP3_CNDGT;
6714 alu.is_op3 = 1;
6715
6716 alu.dst.sel = ctx->temp_reg;
6717 alu.dst.chan = i;
6718
6719 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6720 alu.src[1].sel = V_SQ_ALU_SRC_1;
6721 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
6722
6723 if (i == last_inst)
6724 alu.last = 1;
6725 r = r600_bytecode_add_alu(ctx->bc, &alu);
6726 if (r)
6727 return r;
6728 }
6729
6730 /* dst = (-tmp > 0 ? -1 : tmp) */
6731 for (i = 0; i <= last_inst; i++) {
6732 if (!(write_mask & (1 << i)))
6733 continue;
6734 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6735 alu.op = ALU_OP3_CNDGT;
6736 alu.is_op3 = 1;
6737 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6738
6739 alu.src[0].sel = ctx->temp_reg;
6740 alu.src[0].chan = i;
6741 alu.src[0].neg = 1;
6742
6743 alu.src[1].sel = V_SQ_ALU_SRC_1;
6744 alu.src[1].neg = 1;
6745
6746 alu.src[2].sel = ctx->temp_reg;
6747 alu.src[2].chan = i;
6748
6749 if (i == last_inst)
6750 alu.last = 1;
6751 r = r600_bytecode_add_alu(ctx->bc, &alu);
6752 if (r)
6753 return r;
6754 }
6755 return 0;
6756 }
6757
6758 static int tgsi_bfi(struct r600_shader_ctx *ctx)
6759 {
6760 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6761 struct r600_bytecode_alu alu;
6762 int i, r, t1, t2;
6763
6764 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6765 int last_inst = tgsi_last_instruction(write_mask);
6766
6767 t1 = r600_get_temp(ctx);
6768
6769 for (i = 0; i < 4; i++) {
6770 if (!(write_mask & (1<<i)))
6771 continue;
6772
6773 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6774 alu.op = ALU_OP2_SETGE_INT;
6775 r600_bytecode_src(&alu.src[0], &ctx->src[3], i);
6776 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6777 alu.src[1].value = 32;
6778 alu.dst.sel = ctx->temp_reg;
6779 alu.dst.chan = i;
6780 alu.dst.write = 1;
6781 alu.last = i == last_inst;
6782 r = r600_bytecode_add_alu(ctx->bc, &alu);
6783 if (r)
6784 return r;
6785 }
6786
6787 for (i = 0; i < 4; i++) {
6788 if (!(write_mask & (1<<i)))
6789 continue;
6790
6791 /* create mask tmp */
6792 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6793 alu.op = ALU_OP2_BFM_INT;
6794 alu.dst.sel = t1;
6795 alu.dst.chan = i;
6796 alu.dst.write = 1;
6797 alu.last = i == last_inst;
6798
6799 r600_bytecode_src(&alu.src[0], &ctx->src[3], i);
6800 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
6801
6802 r = r600_bytecode_add_alu(ctx->bc, &alu);
6803 if (r)
6804 return r;
6805 }
6806
6807 t2 = r600_get_temp(ctx);
6808
6809 for (i = 0; i < 4; i++) {
6810 if (!(write_mask & (1<<i)))
6811 continue;
6812
6813 /* shift insert left */
6814 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6815 alu.op = ALU_OP2_LSHL_INT;
6816 alu.dst.sel = t2;
6817 alu.dst.chan = i;
6818 alu.dst.write = 1;
6819 alu.last = i == last_inst;
6820
6821 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
6822 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
6823
6824 r = r600_bytecode_add_alu(ctx->bc, &alu);
6825 if (r)
6826 return r;
6827 }
6828
6829 for (i = 0; i < 4; i++) {
6830 if (!(write_mask & (1<<i)))
6831 continue;
6832
6833 /* actual bitfield insert */
6834 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6835 alu.op = ALU_OP3_BFI_INT;
6836 alu.is_op3 = 1;
6837 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6838 alu.dst.chan = i;
6839 alu.dst.write = 1;
6840 alu.last = i == last_inst;
6841
6842 alu.src[0].sel = t1;
6843 alu.src[0].chan = i;
6844 alu.src[1].sel = t2;
6845 alu.src[1].chan = i;
6846 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
6847
6848 r = r600_bytecode_add_alu(ctx->bc, &alu);
6849 if (r)
6850 return r;
6851 }
6852
6853 for (i = 0; i < 4; i++) {
6854 if (!(write_mask & (1<<i)))
6855 continue;
6856 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6857 alu.op = ALU_OP3_CNDE_INT;
6858 alu.is_op3 = 1;
6859 alu.src[0].sel = ctx->temp_reg;
6860 alu.src[0].chan = i;
6861 r600_bytecode_src(&alu.src[2], &ctx->src[1], i);
6862
6863 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6864
6865 alu.src[1].sel = alu.dst.sel;
6866 alu.src[1].chan = i;
6867
6868 alu.last = i == last_inst;
6869 r = r600_bytecode_add_alu(ctx->bc, &alu);
6870 if (r)
6871 return r;
6872 }
6873 return 0;
6874 }
6875
6876 static int tgsi_msb(struct r600_shader_ctx *ctx)
6877 {
6878 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6879 struct r600_bytecode_alu alu;
6880 int i, r, t1, t2;
6881
6882 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6883 int last_inst = tgsi_last_instruction(write_mask);
6884
6885 assert(ctx->inst_info->op == ALU_OP1_FFBH_INT ||
6886 ctx->inst_info->op == ALU_OP1_FFBH_UINT);
6887
6888 t1 = ctx->temp_reg;
6889
6890 /* bit position is indexed from lsb by TGSI, and from msb by the hardware */
6891 for (i = 0; i < 4; i++) {
6892 if (!(write_mask & (1<<i)))
6893 continue;
6894
6895 /* t1 = FFBH_INT / FFBH_UINT */
6896 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6897 alu.op = ctx->inst_info->op;
6898 alu.dst.sel = t1;
6899 alu.dst.chan = i;
6900 alu.dst.write = 1;
6901 alu.last = i == last_inst;
6902
6903 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6904
6905 r = r600_bytecode_add_alu(ctx->bc, &alu);
6906 if (r)
6907 return r;
6908 }
6909
6910 t2 = r600_get_temp(ctx);
6911
6912 for (i = 0; i < 4; i++) {
6913 if (!(write_mask & (1<<i)))
6914 continue;
6915
6916 /* t2 = 31 - t1 */
6917 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6918 alu.op = ALU_OP2_SUB_INT;
6919 alu.dst.sel = t2;
6920 alu.dst.chan = i;
6921 alu.dst.write = 1;
6922 alu.last = i == last_inst;
6923
6924 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
6925 alu.src[0].value = 31;
6926 alu.src[1].sel = t1;
6927 alu.src[1].chan = i;
6928
6929 r = r600_bytecode_add_alu(ctx->bc, &alu);
6930 if (r)
6931 return r;
6932 }
6933
6934 for (i = 0; i < 4; i++) {
6935 if (!(write_mask & (1<<i)))
6936 continue;
6937
6938 /* result = t1 >= 0 ? t2 : t1 */
6939 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6940 alu.op = ALU_OP3_CNDGE_INT;
6941 alu.is_op3 = 1;
6942 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6943 alu.dst.chan = i;
6944 alu.dst.write = 1;
6945 alu.last = i == last_inst;
6946
6947 alu.src[0].sel = t1;
6948 alu.src[0].chan = i;
6949 alu.src[1].sel = t2;
6950 alu.src[1].chan = i;
6951 alu.src[2].sel = t1;
6952 alu.src[2].chan = i;
6953
6954 r = r600_bytecode_add_alu(ctx->bc, &alu);
6955 if (r)
6956 return r;
6957 }
6958
6959 return 0;
6960 }
6961
6962 static int tgsi_interp_egcm(struct r600_shader_ctx *ctx)
6963 {
6964 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6965 struct r600_bytecode_alu alu;
6966 int r, i = 0, k, interp_gpr, interp_base_chan, tmp, lasti;
6967 unsigned location;
6968 const int input = inst->Src[0].Register.Index + ctx->shader->nsys_inputs;
6969
6970 assert(inst->Src[0].Register.File == TGSI_FILE_INPUT);
6971
6972 /* Interpolators have been marked for use already by allocate_system_value_inputs */
6973 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
6974 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
6975 location = TGSI_INTERPOLATE_LOC_CENTER; /* sample offset will be added explicitly */
6976 }
6977 else {
6978 location = TGSI_INTERPOLATE_LOC_CENTROID;
6979 }
6980
6981 k = eg_get_interpolator_index(ctx->shader->input[input].interpolate, location);
6982 if (k < 0)
6983 k = 0;
6984 interp_gpr = ctx->eg_interpolators[k].ij_index / 2;
6985 interp_base_chan = 2 * (ctx->eg_interpolators[k].ij_index % 2);
6986
6987 /* NOTE: currently offset is not perspective correct */
6988 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
6989 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
6990 int sample_gpr = -1;
6991 int gradientsH, gradientsV;
6992 struct r600_bytecode_tex tex;
6993
6994 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
6995 sample_gpr = load_sample_position(ctx, &ctx->src[1], ctx->src[1].swizzle[0]);
6996 }
6997
6998 gradientsH = r600_get_temp(ctx);
6999 gradientsV = r600_get_temp(ctx);
7000 for (i = 0; i < 2; i++) {
7001 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
7002 tex.op = i == 0 ? FETCH_OP_GET_GRADIENTS_H : FETCH_OP_GET_GRADIENTS_V;
7003 tex.src_gpr = interp_gpr;
7004 tex.src_sel_x = interp_base_chan + 0;
7005 tex.src_sel_y = interp_base_chan + 1;
7006 tex.src_sel_z = 0;
7007 tex.src_sel_w = 0;
7008 tex.dst_gpr = i == 0 ? gradientsH : gradientsV;
7009 tex.dst_sel_x = 0;
7010 tex.dst_sel_y = 1;
7011 tex.dst_sel_z = 7;
7012 tex.dst_sel_w = 7;
7013 tex.inst_mod = 1; // Use per pixel gradient calculation
7014 tex.sampler_id = 0;
7015 tex.resource_id = tex.sampler_id;
7016 r = r600_bytecode_add_tex(ctx->bc, &tex);
7017 if (r)
7018 return r;
7019 }
7020
7021 for (i = 0; i < 2; i++) {
7022 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7023 alu.op = ALU_OP3_MULADD;
7024 alu.is_op3 = 1;
7025 alu.src[0].sel = gradientsH;
7026 alu.src[0].chan = i;
7027 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
7028 alu.src[1].sel = sample_gpr;
7029 alu.src[1].chan = 2;
7030 }
7031 else {
7032 r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
7033 }
7034 alu.src[2].sel = interp_gpr;
7035 alu.src[2].chan = interp_base_chan + i;
7036 alu.dst.sel = ctx->temp_reg;
7037 alu.dst.chan = i;
7038 alu.last = i == 1;
7039
7040 r = r600_bytecode_add_alu(ctx->bc, &alu);
7041 if (r)
7042 return r;
7043 }
7044
7045 for (i = 0; i < 2; i++) {
7046 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7047 alu.op = ALU_OP3_MULADD;
7048 alu.is_op3 = 1;
7049 alu.src[0].sel = gradientsV;
7050 alu.src[0].chan = i;
7051 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
7052 alu.src[1].sel = sample_gpr;
7053 alu.src[1].chan = 3;
7054 }
7055 else {
7056 r600_bytecode_src(&alu.src[1], &ctx->src[1], 1);
7057 }
7058 alu.src[2].sel = ctx->temp_reg;
7059 alu.src[2].chan = i;
7060 alu.dst.sel = ctx->temp_reg;
7061 alu.dst.chan = i;
7062 alu.last = i == 1;
7063
7064 r = r600_bytecode_add_alu(ctx->bc, &alu);
7065 if (r)
7066 return r;
7067 }
7068 }
7069
7070 tmp = r600_get_temp(ctx);
7071 for (i = 0; i < 8; i++) {
7072 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7073 alu.op = i < 4 ? ALU_OP2_INTERP_ZW : ALU_OP2_INTERP_XY;
7074
7075 alu.dst.sel = tmp;
7076 if ((i > 1 && i < 6)) {
7077 alu.dst.write = 1;
7078 }
7079 else {
7080 alu.dst.write = 0;
7081 }
7082 alu.dst.chan = i % 4;
7083
7084 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
7085 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
7086 alu.src[0].sel = ctx->temp_reg;
7087 alu.src[0].chan = 1 - (i % 2);
7088 } else {
7089 alu.src[0].sel = interp_gpr;
7090 alu.src[0].chan = interp_base_chan + 1 - (i % 2);
7091 }
7092 alu.src[1].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
7093 alu.src[1].chan = 0;
7094
7095 alu.last = i % 4 == 3;
7096 alu.bank_swizzle_force = SQ_ALU_VEC_210;
7097
7098 r = r600_bytecode_add_alu(ctx->bc, &alu);
7099 if (r)
7100 return r;
7101 }
7102
7103 // INTERP can't swizzle dst
7104 lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
7105 for (i = 0; i <= lasti; i++) {
7106 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
7107 continue;
7108
7109 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7110 alu.op = ALU_OP1_MOV;
7111 alu.src[0].sel = tmp;
7112 alu.src[0].chan = ctx->src[0].swizzle[i];
7113 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
7114 alu.dst.write = 1;
7115 alu.last = i == lasti;
7116 r = r600_bytecode_add_alu(ctx->bc, &alu);
7117 if (r)
7118 return r;
7119 }
7120
7121 return 0;
7122 }
7123
7124
7125 static int tgsi_helper_copy(struct r600_shader_ctx *ctx, struct tgsi_full_instruction *inst)
7126 {
7127 struct r600_bytecode_alu alu;
7128 int i, r;
7129
7130 for (i = 0; i < 4; i++) {
7131 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7132 if (!(inst->Dst[0].Register.WriteMask & (1 << i))) {
7133 alu.op = ALU_OP0_NOP;
7134 alu.dst.chan = i;
7135 } else {
7136 alu.op = ALU_OP1_MOV;
7137 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
7138 alu.src[0].sel = ctx->temp_reg;
7139 alu.src[0].chan = i;
7140 }
7141 if (i == 3) {
7142 alu.last = 1;
7143 }
7144 r = r600_bytecode_add_alu(ctx->bc, &alu);
7145 if (r)
7146 return r;
7147 }
7148 return 0;
7149 }
7150
7151 static int tgsi_make_src_for_op3(struct r600_shader_ctx *ctx,
7152 unsigned writemask,
7153 struct r600_bytecode_alu_src *bc_src,
7154 const struct r600_shader_src *shader_src)
7155 {
7156 struct r600_bytecode_alu alu;
7157 int i, r;
7158 int lasti = tgsi_last_instruction(writemask);
7159 int temp_reg = 0;
7160
7161 r600_bytecode_src(&bc_src[0], shader_src, 0);
7162 r600_bytecode_src(&bc_src[1], shader_src, 1);
7163 r600_bytecode_src(&bc_src[2], shader_src, 2);
7164 r600_bytecode_src(&bc_src[3], shader_src, 3);
7165
7166 if (bc_src->abs) {
7167 temp_reg = r600_get_temp(ctx);
7168
7169 for (i = 0; i < lasti + 1; i++) {
7170 if (!(writemask & (1 << i)))
7171 continue;
7172 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7173 alu.op = ALU_OP1_MOV;
7174 alu.dst.sel = temp_reg;
7175 alu.dst.chan = i;
7176 alu.dst.write = 1;
7177 alu.src[0] = bc_src[i];
7178 if (i == lasti) {
7179 alu.last = 1;
7180 }
7181 r = r600_bytecode_add_alu(ctx->bc, &alu);
7182 if (r)
7183 return r;
7184 memset(&bc_src[i], 0, sizeof(*bc_src));
7185 bc_src[i].sel = temp_reg;
7186 bc_src[i].chan = i;
7187 }
7188 }
7189 return 0;
7190 }
7191
7192 static int tgsi_op3_dst(struct r600_shader_ctx *ctx, int dst)
7193 {
7194 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7195 struct r600_bytecode_alu alu;
7196 struct r600_bytecode_alu_src srcs[4][4];
7197 int i, j, r;
7198 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
7199 unsigned op = ctx->inst_info->op;
7200
7201 if (op == ALU_OP3_MULADD_IEEE &&
7202 ctx->info.properties[TGSI_PROPERTY_MUL_ZERO_WINS])
7203 op = ALU_OP3_MULADD;
7204
7205 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
7206 r = tgsi_make_src_for_op3(ctx, inst->Dst[0].Register.WriteMask,
7207 srcs[j], &ctx->src[j]);
7208 if (r)
7209 return r;
7210 }
7211
7212 for (i = 0; i < lasti + 1; i++) {
7213 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
7214 continue;
7215
7216 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7217 alu.op = op;
7218 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
7219 alu.src[j] = srcs[j][i];
7220 }
7221
7222 if (dst == -1) {
7223 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
7224 } else {
7225 alu.dst.sel = dst;
7226 }
7227 alu.dst.chan = i;
7228 alu.dst.write = 1;
7229 alu.is_op3 = 1;
7230 if (i == lasti) {
7231 alu.last = 1;
7232 }
7233 r = r600_bytecode_add_alu(ctx->bc, &alu);
7234 if (r)
7235 return r;
7236 }
7237 return 0;
7238 }
7239
7240 static int tgsi_op3(struct r600_shader_ctx *ctx)
7241 {
7242 return tgsi_op3_dst(ctx, -1);
7243 }
7244
7245 static int tgsi_dp(struct r600_shader_ctx *ctx)
7246 {
7247 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7248 struct r600_bytecode_alu alu;
7249 int i, j, r;
7250 unsigned op = ctx->inst_info->op;
7251 if (op == ALU_OP2_DOT4_IEEE &&
7252 ctx->info.properties[TGSI_PROPERTY_MUL_ZERO_WINS])
7253 op = ALU_OP2_DOT4;
7254
7255 for (i = 0; i < 4; i++) {
7256 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7257 alu.op = op;
7258 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
7259 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
7260 }
7261
7262 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
7263 alu.dst.chan = i;
7264 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
7265 /* handle some special cases */
7266 switch (inst->Instruction.Opcode) {
7267 case TGSI_OPCODE_DP2:
7268 if (i > 1) {
7269 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
7270 alu.src[0].chan = alu.src[1].chan = 0;
7271 }
7272 break;
7273 case TGSI_OPCODE_DP3:
7274 if (i > 2) {
7275 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
7276 alu.src[0].chan = alu.src[1].chan = 0;
7277 }
7278 break;
7279 default:
7280 break;
7281 }
7282 if (i == 3) {
7283 alu.last = 1;
7284 }
7285 r = r600_bytecode_add_alu(ctx->bc, &alu);
7286 if (r)
7287 return r;
7288 }
7289 return 0;
7290 }
7291
7292 static inline boolean tgsi_tex_src_requires_loading(struct r600_shader_ctx *ctx,
7293 unsigned index)
7294 {
7295 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7296 return (inst->Src[index].Register.File != TGSI_FILE_TEMPORARY &&
7297 inst->Src[index].Register.File != TGSI_FILE_INPUT &&
7298 inst->Src[index].Register.File != TGSI_FILE_OUTPUT) ||
7299 ctx->src[index].neg || ctx->src[index].abs ||
7300 (inst->Src[index].Register.File == TGSI_FILE_INPUT && ctx->type == PIPE_SHADER_GEOMETRY);
7301 }
7302
7303 static inline unsigned tgsi_tex_get_src_gpr(struct r600_shader_ctx *ctx,
7304 unsigned index)
7305 {
7306 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7307 return ctx->file_offset[inst->Src[index].Register.File] + inst->Src[index].Register.Index;
7308 }
7309
7310 static int do_vtx_fetch_inst(struct r600_shader_ctx *ctx, boolean src_requires_loading)
7311 {
7312 struct r600_bytecode_vtx vtx;
7313 struct r600_bytecode_alu alu;
7314 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7315 int src_gpr, r, i;
7316 int id = tgsi_tex_get_src_gpr(ctx, 1);
7317 int sampler_index_mode = inst->Src[1].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
7318
7319 src_gpr = tgsi_tex_get_src_gpr(ctx, 0);
7320 if (src_requires_loading) {
7321 for (i = 0; i < 4; i++) {
7322 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7323 alu.op = ALU_OP1_MOV;
7324 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7325 alu.dst.sel = ctx->temp_reg;
7326 alu.dst.chan = i;
7327 if (i == 3)
7328 alu.last = 1;
7329 alu.dst.write = 1;
7330 r = r600_bytecode_add_alu(ctx->bc, &alu);
7331 if (r)
7332 return r;
7333 }
7334 src_gpr = ctx->temp_reg;
7335 }
7336
7337 memset(&vtx, 0, sizeof(vtx));
7338 vtx.op = FETCH_OP_VFETCH;
7339 vtx.buffer_id = id + R600_MAX_CONST_BUFFERS;
7340 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
7341 vtx.src_gpr = src_gpr;
7342 vtx.mega_fetch_count = 16;
7343 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
7344 vtx.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; /* SEL_X */
7345 vtx.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7; /* SEL_Y */
7346 vtx.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7; /* SEL_Z */
7347 vtx.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7; /* SEL_W */
7348 vtx.use_const_fields = 1;
7349 vtx.buffer_index_mode = sampler_index_mode;
7350
7351 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
7352 return r;
7353
7354 if (ctx->bc->chip_class >= EVERGREEN)
7355 return 0;
7356
7357 for (i = 0; i < 4; i++) {
7358 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
7359 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
7360 continue;
7361
7362 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7363 alu.op = ALU_OP2_AND_INT;
7364
7365 alu.dst.chan = i;
7366 alu.dst.sel = vtx.dst_gpr;
7367 alu.dst.write = 1;
7368
7369 alu.src[0].sel = vtx.dst_gpr;
7370 alu.src[0].chan = i;
7371
7372 alu.src[1].sel = R600_SHADER_BUFFER_INFO_SEL;
7373 alu.src[1].sel += (id * 2);
7374 alu.src[1].chan = i % 4;
7375 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
7376
7377 if (i == lasti)
7378 alu.last = 1;
7379 r = r600_bytecode_add_alu(ctx->bc, &alu);
7380 if (r)
7381 return r;
7382 }
7383
7384 if (inst->Dst[0].Register.WriteMask & 3) {
7385 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7386 alu.op = ALU_OP2_OR_INT;
7387
7388 alu.dst.chan = 3;
7389 alu.dst.sel = vtx.dst_gpr;
7390 alu.dst.write = 1;
7391
7392 alu.src[0].sel = vtx.dst_gpr;
7393 alu.src[0].chan = 3;
7394
7395 alu.src[1].sel = R600_SHADER_BUFFER_INFO_SEL + (id * 2) + 1;
7396 alu.src[1].chan = 0;
7397 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
7398
7399 alu.last = 1;
7400 r = r600_bytecode_add_alu(ctx->bc, &alu);
7401 if (r)
7402 return r;
7403 }
7404 return 0;
7405 }
7406
7407 static int r600_do_buffer_txq(struct r600_shader_ctx *ctx, int reg_idx, int offset, int eg_buffer_base)
7408 {
7409 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7410 int r;
7411 int id = tgsi_tex_get_src_gpr(ctx, reg_idx) + offset;
7412 int sampler_index_mode = inst->Src[reg_idx].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
7413
7414 if (ctx->bc->chip_class < EVERGREEN) {
7415 struct r600_bytecode_alu alu;
7416 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7417 alu.op = ALU_OP1_MOV;
7418 alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
7419 /* r600 we have them at channel 2 of the second dword */
7420 alu.src[0].sel += (id * 2) + 1;
7421 alu.src[0].chan = 1;
7422 alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
7423 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
7424 alu.last = 1;
7425 r = r600_bytecode_add_alu(ctx->bc, &alu);
7426 if (r)
7427 return r;
7428 return 0;
7429 } else {
7430 struct r600_bytecode_vtx vtx;
7431 memset(&vtx, 0, sizeof(vtx));
7432 vtx.op = FETCH_OP_GET_BUFFER_RESINFO;
7433 vtx.buffer_id = id + eg_buffer_base;
7434 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
7435 vtx.src_gpr = 0;
7436 vtx.mega_fetch_count = 16; /* no idea here really... */
7437 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
7438 vtx.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; /* SEL_X */
7439 vtx.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 4 : 7; /* SEL_Y */
7440 vtx.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 4 : 7; /* SEL_Z */
7441 vtx.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 4 : 7; /* SEL_W */
7442 vtx.data_format = FMT_32_32_32_32;
7443 vtx.buffer_index_mode = sampler_index_mode;
7444
7445 if ((r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx)))
7446 return r;
7447 return 0;
7448 }
7449 }
7450
7451
7452 static int tgsi_tex(struct r600_shader_ctx *ctx)
7453 {
7454 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7455 struct r600_bytecode_tex tex;
7456 struct r600_bytecode_tex grad_offs[3];
7457 struct r600_bytecode_alu alu;
7458 unsigned src_gpr;
7459 int r, i, j, n_grad_offs = 0;
7460 int opcode;
7461 bool read_compressed_msaa = ctx->bc->has_compressed_msaa_texturing &&
7462 inst->Instruction.Opcode == TGSI_OPCODE_TXF &&
7463 (inst->Texture.Texture == TGSI_TEXTURE_2D_MSAA ||
7464 inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY_MSAA);
7465
7466 bool txf_add_offsets = inst->Texture.NumOffsets &&
7467 inst->Instruction.Opcode == TGSI_OPCODE_TXF &&
7468 inst->Texture.Texture != TGSI_TEXTURE_BUFFER;
7469
7470 /* Texture fetch instructions can only use gprs as source.
7471 * Also they cannot negate the source or take the absolute value */
7472 const boolean src_requires_loading = (inst->Instruction.Opcode != TGSI_OPCODE_TXQS &&
7473 tgsi_tex_src_requires_loading(ctx, 0)) ||
7474 read_compressed_msaa || txf_add_offsets;
7475
7476 boolean src_loaded = FALSE;
7477 unsigned sampler_src_reg = 1;
7478 int8_t offset_x = 0, offset_y = 0, offset_z = 0;
7479 boolean has_txq_cube_array_z = false;
7480 unsigned sampler_index_mode;
7481 int array_index_offset_channel = -1;
7482
7483 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ &&
7484 ((inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
7485 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY)))
7486 if (inst->Dst[0].Register.WriteMask & 4) {
7487 ctx->shader->has_txq_cube_array_z_comp = true;
7488 has_txq_cube_array_z = true;
7489 }
7490
7491 if (inst->Instruction.Opcode == TGSI_OPCODE_TEX2 ||
7492 inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
7493 inst->Instruction.Opcode == TGSI_OPCODE_TXL2 ||
7494 inst->Instruction.Opcode == TGSI_OPCODE_TG4)
7495 sampler_src_reg = 2;
7496
7497 /* TGSI moves the sampler to src reg 3 for TXD */
7498 if (inst->Instruction.Opcode == TGSI_OPCODE_TXD)
7499 sampler_src_reg = 3;
7500
7501 sampler_index_mode = inst->Src[sampler_src_reg].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
7502
7503 src_gpr = tgsi_tex_get_src_gpr(ctx, 0);
7504
7505 if (inst->Texture.Texture == TGSI_TEXTURE_BUFFER) {
7506 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ) {
7507 if (ctx->bc->chip_class < EVERGREEN)
7508 ctx->shader->uses_tex_buffers = true;
7509 return r600_do_buffer_txq(ctx, 1, 0, R600_MAX_CONST_BUFFERS);
7510 }
7511 else if (inst->Instruction.Opcode == TGSI_OPCODE_TXF) {
7512 if (ctx->bc->chip_class < EVERGREEN)
7513 ctx->shader->uses_tex_buffers = true;
7514 return do_vtx_fetch_inst(ctx, src_requires_loading);
7515 }
7516 }
7517
7518 if (inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
7519 int out_chan;
7520 /* Add perspective divide */
7521 if (ctx->bc->chip_class == CAYMAN) {
7522 out_chan = 2;
7523 for (i = 0; i < 3; i++) {
7524 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7525 alu.op = ALU_OP1_RECIP_IEEE;
7526 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7527
7528 alu.dst.sel = ctx->temp_reg;
7529 alu.dst.chan = i;
7530 if (i == 2)
7531 alu.last = 1;
7532 if (out_chan == i)
7533 alu.dst.write = 1;
7534 r = r600_bytecode_add_alu(ctx->bc, &alu);
7535 if (r)
7536 return r;
7537 }
7538
7539 } else {
7540 out_chan = 3;
7541 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7542 alu.op = ALU_OP1_RECIP_IEEE;
7543 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7544
7545 alu.dst.sel = ctx->temp_reg;
7546 alu.dst.chan = out_chan;
7547 alu.last = 1;
7548 alu.dst.write = 1;
7549 r = r600_bytecode_add_alu(ctx->bc, &alu);
7550 if (r)
7551 return r;
7552 }
7553
7554 for (i = 0; i < 3; i++) {
7555 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7556 alu.op = ALU_OP2_MUL;
7557 alu.src[0].sel = ctx->temp_reg;
7558 alu.src[0].chan = out_chan;
7559 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
7560 alu.dst.sel = ctx->temp_reg;
7561 alu.dst.chan = i;
7562 alu.dst.write = 1;
7563 r = r600_bytecode_add_alu(ctx->bc, &alu);
7564 if (r)
7565 return r;
7566 }
7567 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7568 alu.op = ALU_OP1_MOV;
7569 alu.src[0].sel = V_SQ_ALU_SRC_1;
7570 alu.src[0].chan = 0;
7571 alu.dst.sel = ctx->temp_reg;
7572 alu.dst.chan = 3;
7573 alu.last = 1;
7574 alu.dst.write = 1;
7575 r = r600_bytecode_add_alu(ctx->bc, &alu);
7576 if (r)
7577 return r;
7578 src_loaded = TRUE;
7579 src_gpr = ctx->temp_reg;
7580 }
7581
7582
7583 if ((inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
7584 inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
7585 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
7586 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) &&
7587 inst->Instruction.Opcode != TGSI_OPCODE_TXQ) {
7588
7589 static const unsigned src0_swizzle[] = {2, 2, 0, 1};
7590 static const unsigned src1_swizzle[] = {1, 0, 2, 2};
7591
7592 /* tmp1.xyzw = CUBE(R0.zzxy, R0.yxzz) */
7593 for (i = 0; i < 4; i++) {
7594 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7595 alu.op = ALU_OP2_CUBE;
7596 r600_bytecode_src(&alu.src[0], &ctx->src[0], src0_swizzle[i]);
7597 r600_bytecode_src(&alu.src[1], &ctx->src[0], src1_swizzle[i]);
7598 alu.dst.sel = ctx->temp_reg;
7599 alu.dst.chan = i;
7600 if (i == 3)
7601 alu.last = 1;
7602 alu.dst.write = 1;
7603 r = r600_bytecode_add_alu(ctx->bc, &alu);
7604 if (r)
7605 return r;
7606 }
7607
7608 /* tmp1.z = RCP_e(|tmp1.z|) */
7609 if (ctx->bc->chip_class == CAYMAN) {
7610 for (i = 0; i < 3; i++) {
7611 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7612 alu.op = ALU_OP1_RECIP_IEEE;
7613 alu.src[0].sel = ctx->temp_reg;
7614 alu.src[0].chan = 2;
7615 alu.src[0].abs = 1;
7616 alu.dst.sel = ctx->temp_reg;
7617 alu.dst.chan = i;
7618 if (i == 2)
7619 alu.dst.write = 1;
7620 if (i == 2)
7621 alu.last = 1;
7622 r = r600_bytecode_add_alu(ctx->bc, &alu);
7623 if (r)
7624 return r;
7625 }
7626 } else {
7627 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7628 alu.op = ALU_OP1_RECIP_IEEE;
7629 alu.src[0].sel = ctx->temp_reg;
7630 alu.src[0].chan = 2;
7631 alu.src[0].abs = 1;
7632 alu.dst.sel = ctx->temp_reg;
7633 alu.dst.chan = 2;
7634 alu.dst.write = 1;
7635 alu.last = 1;
7636 r = r600_bytecode_add_alu(ctx->bc, &alu);
7637 if (r)
7638 return r;
7639 }
7640
7641 /* MULADD R0.x, R0.x, PS1, (0x3FC00000, 1.5f).x
7642 * MULADD R0.y, R0.y, PS1, (0x3FC00000, 1.5f).x
7643 * muladd has no writemask, have to use another temp
7644 */
7645 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7646 alu.op = ALU_OP3_MULADD;
7647 alu.is_op3 = 1;
7648
7649 alu.src[0].sel = ctx->temp_reg;
7650 alu.src[0].chan = 0;
7651 alu.src[1].sel = ctx->temp_reg;
7652 alu.src[1].chan = 2;
7653
7654 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
7655 alu.src[2].chan = 0;
7656 alu.src[2].value = u_bitcast_f2u(1.5f);
7657
7658 alu.dst.sel = ctx->temp_reg;
7659 alu.dst.chan = 0;
7660 alu.dst.write = 1;
7661
7662 r = r600_bytecode_add_alu(ctx->bc, &alu);
7663 if (r)
7664 return r;
7665
7666 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7667 alu.op = ALU_OP3_MULADD;
7668 alu.is_op3 = 1;
7669
7670 alu.src[0].sel = ctx->temp_reg;
7671 alu.src[0].chan = 1;
7672 alu.src[1].sel = ctx->temp_reg;
7673 alu.src[1].chan = 2;
7674
7675 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
7676 alu.src[2].chan = 0;
7677 alu.src[2].value = u_bitcast_f2u(1.5f);
7678
7679 alu.dst.sel = ctx->temp_reg;
7680 alu.dst.chan = 1;
7681 alu.dst.write = 1;
7682
7683 alu.last = 1;
7684 r = r600_bytecode_add_alu(ctx->bc, &alu);
7685 if (r)
7686 return r;
7687 /* write initial compare value into Z component
7688 - W src 0 for shadow cube
7689 - X src 1 for shadow cube array */
7690 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
7691 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
7692 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7693 alu.op = ALU_OP1_MOV;
7694 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY)
7695 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
7696 else
7697 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7698 alu.dst.sel = ctx->temp_reg;
7699 alu.dst.chan = 2;
7700 alu.dst.write = 1;
7701 alu.last = 1;
7702 r = r600_bytecode_add_alu(ctx->bc, &alu);
7703 if (r)
7704 return r;
7705 }
7706
7707 if (inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
7708 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
7709 if (ctx->bc->chip_class >= EVERGREEN) {
7710 int mytmp = r600_get_temp(ctx);
7711 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7712 alu.op = ALU_OP1_MOV;
7713 alu.src[0].sel = ctx->temp_reg;
7714 alu.src[0].chan = 3;
7715 alu.dst.sel = mytmp;
7716 alu.dst.chan = 0;
7717 alu.dst.write = 1;
7718 alu.last = 1;
7719 r = r600_bytecode_add_alu(ctx->bc, &alu);
7720 if (r)
7721 return r;
7722
7723 /* Evaluate the array index according to floor(idx + 0.5). This
7724 * needs to be done before merging the face select value, because
7725 * otherwise the fractional part of the array index will interfere
7726 * with the face select value */
7727 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7728 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7729 alu.op = ALU_OP1_RNDNE;
7730 alu.dst.sel = ctx->temp_reg;
7731 alu.dst.chan = 3;
7732 alu.dst.write = 1;
7733 alu.last = 1;
7734 r = r600_bytecode_add_alu(ctx->bc, &alu);
7735 if (r)
7736 return r;
7737
7738 /* Because the array slice index and the cube face index are merged
7739 * into one value we have to make sure the array slice index is >= 0,
7740 * otherwise the face selection will fail */
7741 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7742 alu.op = ALU_OP2_MAX;
7743 alu.src[0].sel = ctx->temp_reg;
7744 alu.src[0].chan = 3;
7745 alu.src[1].sel = V_SQ_ALU_SRC_0;
7746 alu.dst.sel = ctx->temp_reg;
7747 alu.dst.chan = 3;
7748 alu.dst.write = 1;
7749 alu.last = 1;
7750 r = r600_bytecode_add_alu(ctx->bc, &alu);
7751 if (r)
7752 return r;
7753
7754 /* have to multiply original layer by 8 and add to face id (temp.w) in Z */
7755 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7756 alu.op = ALU_OP3_MULADD;
7757 alu.is_op3 = 1;
7758 alu.src[0].sel = ctx->temp_reg;
7759 alu.src[0].chan = 3;
7760 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
7761 alu.src[1].chan = 0;
7762 alu.src[1].value = u_bitcast_f2u(8.0f);
7763 alu.src[2].sel = mytmp;
7764 alu.src[2].chan = 0;
7765 alu.dst.sel = ctx->temp_reg;
7766 alu.dst.chan = 3;
7767 alu.dst.write = 1;
7768 alu.last = 1;
7769 r = r600_bytecode_add_alu(ctx->bc, &alu);
7770 if (r)
7771 return r;
7772 } else if (ctx->bc->chip_class < EVERGREEN) {
7773 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
7774 tex.op = FETCH_OP_SET_CUBEMAP_INDEX;
7775 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
7776 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
7777 tex.src_gpr = r600_get_temp(ctx);
7778 tex.src_sel_x = 0;
7779 tex.src_sel_y = 0;
7780 tex.src_sel_z = 0;
7781 tex.src_sel_w = 0;
7782 tex.dst_sel_x = tex.dst_sel_y = tex.dst_sel_z = tex.dst_sel_w = 7;
7783 tex.coord_type_x = 1;
7784 tex.coord_type_y = 1;
7785 tex.coord_type_z = 1;
7786 tex.coord_type_w = 1;
7787 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7788 alu.op = ALU_OP1_MOV;
7789 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7790 alu.dst.sel = tex.src_gpr;
7791 alu.dst.chan = 0;
7792 alu.last = 1;
7793 alu.dst.write = 1;
7794 r = r600_bytecode_add_alu(ctx->bc, &alu);
7795 if (r)
7796 return r;
7797
7798 r = r600_bytecode_add_tex(ctx->bc, &tex);
7799 if (r)
7800 return r;
7801 }
7802
7803 }
7804
7805 /* for cube forms of lod and bias we need to route things */
7806 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB ||
7807 inst->Instruction.Opcode == TGSI_OPCODE_TXL ||
7808 inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
7809 inst->Instruction.Opcode == TGSI_OPCODE_TXL2) {
7810 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7811 alu.op = ALU_OP1_MOV;
7812 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
7813 inst->Instruction.Opcode == TGSI_OPCODE_TXL2)
7814 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
7815 else
7816 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7817 alu.dst.sel = ctx->temp_reg;
7818 alu.dst.chan = 2;
7819 alu.last = 1;
7820 alu.dst.write = 1;
7821 r = r600_bytecode_add_alu(ctx->bc, &alu);
7822 if (r)
7823 return r;
7824 }
7825
7826 src_loaded = TRUE;
7827 src_gpr = ctx->temp_reg;
7828 }
7829
7830 if (inst->Instruction.Opcode == TGSI_OPCODE_TXD) {
7831 int temp_h = 0, temp_v = 0;
7832 int start_val = 0;
7833
7834 /* if we've already loaded the src (i.e. CUBE don't reload it). */
7835 if (src_loaded == TRUE)
7836 start_val = 1;
7837 else
7838 src_loaded = TRUE;
7839 for (i = start_val; i < 3; i++) {
7840 int treg = r600_get_temp(ctx);
7841
7842 if (i == 0)
7843 src_gpr = treg;
7844 else if (i == 1)
7845 temp_h = treg;
7846 else
7847 temp_v = treg;
7848
7849 for (j = 0; j < 4; j++) {
7850 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7851 alu.op = ALU_OP1_MOV;
7852 r600_bytecode_src(&alu.src[0], &ctx->src[i], j);
7853 alu.dst.sel = treg;
7854 alu.dst.chan = j;
7855 if (j == 3)
7856 alu.last = 1;
7857 alu.dst.write = 1;
7858 r = r600_bytecode_add_alu(ctx->bc, &alu);
7859 if (r)
7860 return r;
7861 }
7862 }
7863 for (i = 1; i < 3; i++) {
7864 /* set gradients h/v */
7865 struct r600_bytecode_tex *t = &grad_offs[n_grad_offs++];
7866 memset(t, 0, sizeof(struct r600_bytecode_tex));
7867 t->op = (i == 1) ? FETCH_OP_SET_GRADIENTS_H :
7868 FETCH_OP_SET_GRADIENTS_V;
7869 t->sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
7870 t->sampler_index_mode = sampler_index_mode;
7871 t->resource_id = t->sampler_id + R600_MAX_CONST_BUFFERS;
7872 t->resource_index_mode = sampler_index_mode;
7873
7874 t->src_gpr = (i == 1) ? temp_h : temp_v;
7875 t->src_sel_x = 0;
7876 t->src_sel_y = 1;
7877 t->src_sel_z = 2;
7878 t->src_sel_w = 3;
7879
7880 t->dst_gpr = r600_get_temp(ctx); /* just to avoid confusing the asm scheduler */
7881 t->dst_sel_x = t->dst_sel_y = t->dst_sel_z = t->dst_sel_w = 7;
7882 if (inst->Texture.Texture != TGSI_TEXTURE_RECT) {
7883 t->coord_type_x = 1;
7884 t->coord_type_y = 1;
7885 t->coord_type_z = 1;
7886 t->coord_type_w = 1;
7887 }
7888 }
7889 }
7890
7891 if (inst->Instruction.Opcode == TGSI_OPCODE_TG4) {
7892 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
7893 * incorrectly forces nearest filtering if the texture format is integer.
7894 * The only effect it has on Gather4, which always returns 4 texels for
7895 * bilinear filtering, is that the final coordinates are off by 0.5 of
7896 * the texel size.
7897 *
7898 * The workaround is to subtract 0.5 from the unnormalized coordinates,
7899 * or (0.5 / size) from the normalized coordinates.
7900 */
7901 if (inst->Texture.ReturnType == TGSI_RETURN_TYPE_SINT ||
7902 inst->Texture.ReturnType == TGSI_RETURN_TYPE_UINT) {
7903 int treg = r600_get_temp(ctx);
7904
7905 /* mov array and comparison oordinate to temp_reg if needed */
7906 if ((inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
7907 inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
7908 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY) && !src_loaded) {
7909 int end = inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ? 3 : 2;
7910 for (i = 2; i <= end; i++) {
7911 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7912 alu.op = ALU_OP1_MOV;
7913 alu.dst.sel = ctx->temp_reg;
7914 alu.dst.chan = i;
7915 alu.dst.write = 1;
7916 alu.last = (i == end);
7917 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7918 r = r600_bytecode_add_alu(ctx->bc, &alu);
7919 if (r)
7920 return r;
7921 }
7922 }
7923
7924 if (inst->Texture.Texture == TGSI_TEXTURE_RECT ||
7925 inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT) {
7926 for (i = 0; i < 2; i++) {
7927 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7928 alu.op = ALU_OP2_ADD;
7929 alu.dst.sel = ctx->temp_reg;
7930 alu.dst.chan = i;
7931 alu.dst.write = 1;
7932 alu.last = i == 1;
7933 if (src_loaded) {
7934 alu.src[0].sel = ctx->temp_reg;
7935 alu.src[0].chan = i;
7936 } else
7937 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7938 alu.src[1].sel = V_SQ_ALU_SRC_0_5;
7939 alu.src[1].neg = 1;
7940 r = r600_bytecode_add_alu(ctx->bc, &alu);
7941 if (r)
7942 return r;
7943 }
7944 } else {
7945 /* execute a TXQ */
7946 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
7947 tex.op = FETCH_OP_GET_TEXTURE_RESINFO;
7948 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
7949 tex.sampler_index_mode = sampler_index_mode;
7950 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
7951 tex.resource_index_mode = sampler_index_mode;
7952 tex.dst_gpr = treg;
7953 tex.src_sel_x = 4;
7954 tex.src_sel_y = 4;
7955 tex.src_sel_z = 4;
7956 tex.src_sel_w = 4;
7957 tex.dst_sel_x = 0;
7958 tex.dst_sel_y = 1;
7959 tex.dst_sel_z = 7;
7960 tex.dst_sel_w = 7;
7961 r = r600_bytecode_add_tex(ctx->bc, &tex);
7962 if (r)
7963 return r;
7964
7965 /* coord.xy = -0.5 * (1.0/int_to_flt(size)) + coord.xy */
7966 if (ctx->bc->chip_class == CAYMAN) {
7967 /* */
7968 for (i = 0; i < 2; i++) {
7969 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7970 alu.op = ALU_OP1_INT_TO_FLT;
7971 alu.dst.sel = treg;
7972 alu.dst.chan = i;
7973 alu.dst.write = 1;
7974 alu.src[0].sel = treg;
7975 alu.src[0].chan = i;
7976 alu.last = (i == 1) ? 1 : 0;
7977 r = r600_bytecode_add_alu(ctx->bc, &alu);
7978 if (r)
7979 return r;
7980 }
7981 for (j = 0; j < 2; j++) {
7982 for (i = 0; i < 3; i++) {
7983 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7984 alu.op = ALU_OP1_RECIP_IEEE;
7985 alu.src[0].sel = treg;
7986 alu.src[0].chan = j;
7987 alu.dst.sel = treg;
7988 alu.dst.chan = i;
7989 if (i == 2)
7990 alu.last = 1;
7991 if (i == j)
7992 alu.dst.write = 1;
7993 r = r600_bytecode_add_alu(ctx->bc, &alu);
7994 if (r)
7995 return r;
7996 }
7997 }
7998 } else {
7999 for (i = 0; i < 2; i++) {
8000 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8001 alu.op = ALU_OP1_INT_TO_FLT;
8002 alu.dst.sel = treg;
8003 alu.dst.chan = i;
8004 alu.dst.write = 1;
8005 alu.src[0].sel = treg;
8006 alu.src[0].chan = i;
8007 alu.last = 1;
8008 r = r600_bytecode_add_alu(ctx->bc, &alu);
8009 if (r)
8010 return r;
8011 }
8012 for (i = 0; i < 2; i++) {
8013 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8014 alu.op = ALU_OP1_RECIP_IEEE;
8015 alu.src[0].sel = treg;
8016 alu.src[0].chan = i;
8017 alu.dst.sel = treg;
8018 alu.dst.chan = i;
8019 alu.last = 1;
8020 alu.dst.write = 1;
8021 r = r600_bytecode_add_alu(ctx->bc, &alu);
8022 if (r)
8023 return r;
8024 }
8025 }
8026 for (i = 0; i < 2; i++) {
8027 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8028 alu.op = ALU_OP3_MULADD;
8029 alu.is_op3 = 1;
8030 alu.dst.sel = ctx->temp_reg;
8031 alu.dst.chan = i;
8032 alu.dst.write = 1;
8033 alu.last = i == 1;
8034 alu.src[0].sel = treg;
8035 alu.src[0].chan = i;
8036 alu.src[1].sel = V_SQ_ALU_SRC_0_5;
8037 alu.src[1].neg = 1;
8038 if (src_loaded) {
8039 alu.src[2].sel = ctx->temp_reg;
8040 alu.src[2].chan = i;
8041 } else
8042 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
8043 r = r600_bytecode_add_alu(ctx->bc, &alu);
8044 if (r)
8045 return r;
8046 }
8047 }
8048 src_loaded = TRUE;
8049 src_gpr = ctx->temp_reg;
8050 }
8051 }
8052
8053 if (src_requires_loading && !src_loaded) {
8054 for (i = 0; i < 4; i++) {
8055 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8056 alu.op = ALU_OP1_MOV;
8057 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
8058 alu.dst.sel = ctx->temp_reg;
8059 alu.dst.chan = i;
8060 if (i == 3)
8061 alu.last = 1;
8062 alu.dst.write = 1;
8063 r = r600_bytecode_add_alu(ctx->bc, &alu);
8064 if (r)
8065 return r;
8066 }
8067 src_loaded = TRUE;
8068 src_gpr = ctx->temp_reg;
8069 }
8070
8071 /* get offset values */
8072 if (inst->Texture.NumOffsets) {
8073 assert(inst->Texture.NumOffsets == 1);
8074
8075 /* The texture offset feature doesn't work with the TXF instruction
8076 * and must be emulated by adding the offset to the texture coordinates. */
8077 if (txf_add_offsets) {
8078 const struct tgsi_texture_offset *off = inst->TexOffsets;
8079
8080 switch (inst->Texture.Texture) {
8081 case TGSI_TEXTURE_3D:
8082 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8083 alu.op = ALU_OP2_ADD_INT;
8084 alu.src[0].sel = src_gpr;
8085 alu.src[0].chan = 2;
8086 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
8087 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleZ];
8088 alu.dst.sel = src_gpr;
8089 alu.dst.chan = 2;
8090 alu.dst.write = 1;
8091 alu.last = 1;
8092 r = r600_bytecode_add_alu(ctx->bc, &alu);
8093 if (r)
8094 return r;
8095 /* fall through */
8096
8097 case TGSI_TEXTURE_2D:
8098 case TGSI_TEXTURE_SHADOW2D:
8099 case TGSI_TEXTURE_RECT:
8100 case TGSI_TEXTURE_SHADOWRECT:
8101 case TGSI_TEXTURE_2D_ARRAY:
8102 case TGSI_TEXTURE_SHADOW2D_ARRAY:
8103 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8104 alu.op = ALU_OP2_ADD_INT;
8105 alu.src[0].sel = src_gpr;
8106 alu.src[0].chan = 1;
8107 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
8108 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleY];
8109 alu.dst.sel = src_gpr;
8110 alu.dst.chan = 1;
8111 alu.dst.write = 1;
8112 alu.last = 1;
8113 r = r600_bytecode_add_alu(ctx->bc, &alu);
8114 if (r)
8115 return r;
8116 /* fall through */
8117
8118 case TGSI_TEXTURE_1D:
8119 case TGSI_TEXTURE_SHADOW1D:
8120 case TGSI_TEXTURE_1D_ARRAY:
8121 case TGSI_TEXTURE_SHADOW1D_ARRAY:
8122 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8123 alu.op = ALU_OP2_ADD_INT;
8124 alu.src[0].sel = src_gpr;
8125 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
8126 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleX];
8127 alu.dst.sel = src_gpr;
8128 alu.dst.write = 1;
8129 alu.last = 1;
8130 r = r600_bytecode_add_alu(ctx->bc, &alu);
8131 if (r)
8132 return r;
8133 break;
8134 /* texture offsets do not apply to other texture targets */
8135 }
8136 } else {
8137 switch (inst->Texture.Texture) {
8138 case TGSI_TEXTURE_3D:
8139 offset_z = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleZ] << 1;
8140 /* fallthrough */
8141 case TGSI_TEXTURE_2D:
8142 case TGSI_TEXTURE_SHADOW2D:
8143 case TGSI_TEXTURE_RECT:
8144 case TGSI_TEXTURE_SHADOWRECT:
8145 case TGSI_TEXTURE_2D_ARRAY:
8146 case TGSI_TEXTURE_SHADOW2D_ARRAY:
8147 offset_y = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleY] << 1;
8148 /* fallthrough */
8149 case TGSI_TEXTURE_1D:
8150 case TGSI_TEXTURE_SHADOW1D:
8151 case TGSI_TEXTURE_1D_ARRAY:
8152 case TGSI_TEXTURE_SHADOW1D_ARRAY:
8153 offset_x = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleX] << 1;
8154 }
8155 }
8156 }
8157
8158 /* Obtain the sample index for reading a compressed MSAA color texture.
8159 * To read the FMASK, we use the ldfptr instruction, which tells us
8160 * where the samples are stored.
8161 * For uncompressed 8x MSAA surfaces, ldfptr should return 0x76543210,
8162 * which is the identity mapping. Each nibble says which physical sample
8163 * should be fetched to get that sample.
8164 *
8165 * Assume src.z contains the sample index. It should be modified like this:
8166 * src.z = (ldfptr() >> (src.z * 4)) & 0xF;
8167 * Then fetch the texel with src.
8168 */
8169 if (read_compressed_msaa) {
8170 unsigned sample_chan = 3;
8171 unsigned temp = r600_get_temp(ctx);
8172 assert(src_loaded);
8173
8174 /* temp.w = ldfptr() */
8175 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
8176 tex.op = FETCH_OP_LD;
8177 tex.inst_mod = 1; /* to indicate this is ldfptr */
8178 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
8179 tex.sampler_index_mode = sampler_index_mode;
8180 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
8181 tex.resource_index_mode = sampler_index_mode;
8182 tex.src_gpr = src_gpr;
8183 tex.dst_gpr = temp;
8184 tex.dst_sel_x = 7; /* mask out these components */
8185 tex.dst_sel_y = 7;
8186 tex.dst_sel_z = 7;
8187 tex.dst_sel_w = 0; /* store X */
8188 tex.src_sel_x = 0;
8189 tex.src_sel_y = 1;
8190 tex.src_sel_z = 2;
8191 tex.src_sel_w = 3;
8192 tex.offset_x = offset_x;
8193 tex.offset_y = offset_y;
8194 tex.offset_z = offset_z;
8195 r = r600_bytecode_add_tex(ctx->bc, &tex);
8196 if (r)
8197 return r;
8198
8199 /* temp.x = sample_index*4 */
8200 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8201 alu.op = ALU_OP2_MULLO_INT;
8202 alu.src[0].sel = src_gpr;
8203 alu.src[0].chan = sample_chan;
8204 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
8205 alu.src[1].value = 4;
8206 alu.dst.sel = temp;
8207 alu.dst.chan = 0;
8208 alu.dst.write = 1;
8209 r = emit_mul_int_op(ctx->bc, &alu);
8210 if (r)
8211 return r;
8212
8213 /* sample_index = temp.w >> temp.x */
8214 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8215 alu.op = ALU_OP2_LSHR_INT;
8216 alu.src[0].sel = temp;
8217 alu.src[0].chan = 3;
8218 alu.src[1].sel = temp;
8219 alu.src[1].chan = 0;
8220 alu.dst.sel = src_gpr;
8221 alu.dst.chan = sample_chan;
8222 alu.dst.write = 1;
8223 alu.last = 1;
8224 r = r600_bytecode_add_alu(ctx->bc, &alu);
8225 if (r)
8226 return r;
8227
8228 /* sample_index & 0xF */
8229 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8230 alu.op = ALU_OP2_AND_INT;
8231 alu.src[0].sel = src_gpr;
8232 alu.src[0].chan = sample_chan;
8233 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
8234 alu.src[1].value = 0xF;
8235 alu.dst.sel = src_gpr;
8236 alu.dst.chan = sample_chan;
8237 alu.dst.write = 1;
8238 alu.last = 1;
8239 r = r600_bytecode_add_alu(ctx->bc, &alu);
8240 if (r)
8241 return r;
8242 #if 0
8243 /* visualize the FMASK */
8244 for (i = 0; i < 4; i++) {
8245 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8246 alu.op = ALU_OP1_INT_TO_FLT;
8247 alu.src[0].sel = src_gpr;
8248 alu.src[0].chan = sample_chan;
8249 alu.dst.sel = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
8250 alu.dst.chan = i;
8251 alu.dst.write = 1;
8252 alu.last = 1;
8253 r = r600_bytecode_add_alu(ctx->bc, &alu);
8254 if (r)
8255 return r;
8256 }
8257 return 0;
8258 #endif
8259 }
8260
8261 /* does this shader want a num layers from TXQ for a cube array? */
8262 if (has_txq_cube_array_z) {
8263 int id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
8264
8265 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8266 alu.op = ALU_OP1_MOV;
8267
8268 alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
8269 if (ctx->bc->chip_class >= EVERGREEN) {
8270 /* with eg each dword is number of cubes */
8271 alu.src[0].sel += id / 4;
8272 alu.src[0].chan = id % 4;
8273 } else {
8274 /* r600 we have them at channel 2 of the second dword */
8275 alu.src[0].sel += (id * 2) + 1;
8276 alu.src[0].chan = 2;
8277 }
8278 alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
8279 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
8280 alu.last = 1;
8281 r = r600_bytecode_add_alu(ctx->bc, &alu);
8282 if (r)
8283 return r;
8284 /* disable writemask from texture instruction */
8285 inst->Dst[0].Register.WriteMask &= ~4;
8286 }
8287
8288 opcode = ctx->inst_info->op;
8289 if (opcode == FETCH_OP_GATHER4 &&
8290 inst->TexOffsets[0].File != TGSI_FILE_NULL &&
8291 inst->TexOffsets[0].File != TGSI_FILE_IMMEDIATE) {
8292 struct r600_bytecode_tex *t;
8293 opcode = FETCH_OP_GATHER4_O;
8294
8295 /* GATHER4_O/GATHER4_C_O use offset values loaded by
8296 SET_TEXTURE_OFFSETS instruction. The immediate offset values
8297 encoded in the instruction are ignored. */
8298 t = &grad_offs[n_grad_offs++];
8299 memset(t, 0, sizeof(struct r600_bytecode_tex));
8300 t->op = FETCH_OP_SET_TEXTURE_OFFSETS;
8301 t->sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
8302 t->sampler_index_mode = sampler_index_mode;
8303 t->resource_id = t->sampler_id + R600_MAX_CONST_BUFFERS;
8304 t->resource_index_mode = sampler_index_mode;
8305
8306 t->src_gpr = ctx->file_offset[inst->TexOffsets[0].File] + inst->TexOffsets[0].Index;
8307 t->src_sel_x = inst->TexOffsets[0].SwizzleX;
8308 t->src_sel_y = inst->TexOffsets[0].SwizzleY;
8309 if (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
8310 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY)
8311 /* make sure array index selector is 0, this is just a safety
8312 * precausion because TGSI seems to emit something strange here */
8313 t->src_sel_z = 4;
8314 else
8315 t->src_sel_z = inst->TexOffsets[0].SwizzleZ;
8316
8317 t->src_sel_w = 4;
8318
8319 t->dst_sel_x = 7;
8320 t->dst_sel_y = 7;
8321 t->dst_sel_z = 7;
8322 t->dst_sel_w = 7;
8323 }
8324
8325 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
8326 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
8327 inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
8328 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
8329 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY ||
8330 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
8331 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
8332 switch (opcode) {
8333 case FETCH_OP_SAMPLE:
8334 opcode = FETCH_OP_SAMPLE_C;
8335 break;
8336 case FETCH_OP_SAMPLE_L:
8337 opcode = FETCH_OP_SAMPLE_C_L;
8338 break;
8339 case FETCH_OP_SAMPLE_LB:
8340 opcode = FETCH_OP_SAMPLE_C_LB;
8341 break;
8342 case FETCH_OP_SAMPLE_G:
8343 opcode = FETCH_OP_SAMPLE_C_G;
8344 break;
8345 /* Texture gather variants */
8346 case FETCH_OP_GATHER4:
8347 opcode = FETCH_OP_GATHER4_C;
8348 break;
8349 case FETCH_OP_GATHER4_O:
8350 opcode = FETCH_OP_GATHER4_C_O;
8351 break;
8352 }
8353 }
8354
8355 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
8356 tex.op = opcode;
8357
8358 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
8359 tex.sampler_index_mode = sampler_index_mode;
8360 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
8361 tex.resource_index_mode = sampler_index_mode;
8362 tex.src_gpr = src_gpr;
8363 tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
8364
8365 if (inst->Instruction.Opcode == TGSI_OPCODE_DDX_FINE ||
8366 inst->Instruction.Opcode == TGSI_OPCODE_DDY_FINE) {
8367 tex.inst_mod = 1; /* per pixel gradient calculation instead of per 2x2 quad */
8368 }
8369
8370 if (inst->Instruction.Opcode == TGSI_OPCODE_TG4) {
8371 int8_t texture_component_select = ctx->literals[4 * inst->Src[1].Register.Index + inst->Src[1].Register.SwizzleX];
8372 tex.inst_mod = texture_component_select;
8373
8374 if (ctx->bc->chip_class == CAYMAN) {
8375 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
8376 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
8377 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
8378 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
8379 } else {
8380 /* GATHER4 result order is different from TGSI TG4 */
8381 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 1 : 7;
8382 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 2 : 7;
8383 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 0 : 7;
8384 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
8385 }
8386 }
8387 else if (inst->Instruction.Opcode == TGSI_OPCODE_LODQ) {
8388 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
8389 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
8390 tex.dst_sel_z = 7;
8391 tex.dst_sel_w = 7;
8392 }
8393 else if (inst->Instruction.Opcode == TGSI_OPCODE_TXQS) {
8394 tex.dst_sel_x = 3;
8395 tex.dst_sel_y = 7;
8396 tex.dst_sel_z = 7;
8397 tex.dst_sel_w = 7;
8398 }
8399 else {
8400 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
8401 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
8402 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
8403 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
8404 }
8405
8406
8407 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQS) {
8408 tex.src_sel_x = 4;
8409 tex.src_sel_y = 4;
8410 tex.src_sel_z = 4;
8411 tex.src_sel_w = 4;
8412 } else if (src_loaded) {
8413 tex.src_sel_x = 0;
8414 tex.src_sel_y = 1;
8415 tex.src_sel_z = 2;
8416 tex.src_sel_w = 3;
8417 } else {
8418 tex.src_sel_x = ctx->src[0].swizzle[0];
8419 tex.src_sel_y = ctx->src[0].swizzle[1];
8420 tex.src_sel_z = ctx->src[0].swizzle[2];
8421 tex.src_sel_w = ctx->src[0].swizzle[3];
8422 tex.src_rel = ctx->src[0].rel;
8423 }
8424
8425 if (inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
8426 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
8427 inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
8428 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
8429 tex.src_sel_x = 1;
8430 tex.src_sel_y = 0;
8431 tex.src_sel_z = 3;
8432 tex.src_sel_w = 2; /* route Z compare or Lod value into W */
8433 }
8434
8435 if (inst->Texture.Texture != TGSI_TEXTURE_RECT &&
8436 inst->Texture.Texture != TGSI_TEXTURE_SHADOWRECT) {
8437 tex.coord_type_x = 1;
8438 tex.coord_type_y = 1;
8439 }
8440 tex.coord_type_z = 1;
8441 tex.coord_type_w = 1;
8442
8443 tex.offset_x = offset_x;
8444 tex.offset_y = offset_y;
8445 if (inst->Instruction.Opcode == TGSI_OPCODE_TG4 &&
8446 (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
8447 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY)) {
8448 tex.offset_z = 0;
8449 }
8450 else {
8451 tex.offset_z = offset_z;
8452 }
8453
8454 /* Put the depth for comparison in W.
8455 * TGSI_TEXTURE_SHADOW2D_ARRAY already has the depth in W.
8456 * Some instructions expect the depth in Z. */
8457 if ((inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
8458 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
8459 inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
8460 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY) &&
8461 opcode != FETCH_OP_SAMPLE_C_L &&
8462 opcode != FETCH_OP_SAMPLE_C_LB) {
8463 tex.src_sel_w = tex.src_sel_z;
8464 }
8465
8466 if (inst->Texture.Texture == TGSI_TEXTURE_1D_ARRAY ||
8467 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY) {
8468 if (opcode == FETCH_OP_SAMPLE_C_L ||
8469 opcode == FETCH_OP_SAMPLE_C_LB) {
8470 /* the array index is read from Y */
8471 tex.coord_type_y = 0;
8472 array_index_offset_channel = tex.src_sel_y;
8473 } else {
8474 /* the array index is read from Z */
8475 tex.coord_type_z = 0;
8476 tex.src_sel_z = tex.src_sel_y;
8477 array_index_offset_channel = tex.src_sel_z;
8478 }
8479 } else if (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
8480 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY) {
8481 tex.coord_type_z = 0;
8482 array_index_offset_channel = tex.src_sel_z;
8483 } else if ((inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
8484 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) &&
8485 (ctx->bc->chip_class >= EVERGREEN))
8486 /* the array index is read from Z, coordinate will be corrected elsewhere */
8487 tex.coord_type_z = 0;
8488
8489 /* We have array access to 1D or 2D ARRAY, the coordinates are not int ->
8490 * evaluate the array index */
8491 if (array_index_offset_channel >= 0 &&
8492 opcode != FETCH_OP_LD &&
8493 opcode != FETCH_OP_GET_TEXTURE_RESINFO) {
8494 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8495 alu.src[0].sel = tex.src_gpr;
8496 alu.src[0].chan = array_index_offset_channel;
8497 alu.src[0].rel = tex.src_rel;
8498 alu.op = ALU_OP1_RNDNE;
8499 alu.dst.sel = tex.src_gpr;
8500 alu.dst.chan = array_index_offset_channel;
8501 alu.dst.rel = tex.src_rel;
8502 alu.dst.write = 1;
8503 alu.last = 1;
8504 r = r600_bytecode_add_alu(ctx->bc, &alu);
8505 if (r)
8506 return r;
8507 }
8508
8509 /* mask unused source components */
8510 if (opcode == FETCH_OP_SAMPLE || opcode == FETCH_OP_GATHER4) {
8511 switch (inst->Texture.Texture) {
8512 case TGSI_TEXTURE_2D:
8513 case TGSI_TEXTURE_RECT:
8514 tex.src_sel_z = 7;
8515 tex.src_sel_w = 7;
8516 break;
8517 case TGSI_TEXTURE_1D_ARRAY:
8518 tex.src_sel_y = 7;
8519 tex.src_sel_w = 7;
8520 break;
8521 case TGSI_TEXTURE_1D:
8522 tex.src_sel_y = 7;
8523 tex.src_sel_z = 7;
8524 tex.src_sel_w = 7;
8525 break;
8526 }
8527 }
8528
8529 /* Emit set gradient and offset instructions. */
8530 for (i = 0; i < n_grad_offs; ++i) {
8531 r = r600_bytecode_add_tex(ctx->bc, &grad_offs[i]);
8532 if (r)
8533 return r;
8534 }
8535
8536 r = r600_bytecode_add_tex(ctx->bc, &tex);
8537 if (r)
8538 return r;
8539
8540 /* add shadow ambient support - gallium doesn't do it yet */
8541 return 0;
8542 }
8543
8544 static int find_hw_atomic_counter(struct r600_shader_ctx *ctx,
8545 struct tgsi_full_src_register *src)
8546 {
8547 unsigned i;
8548
8549 if (src->Register.Indirect) {
8550 for (i = 0; i < ctx->shader->nhwatomic_ranges; i++) {
8551 if (src->Indirect.ArrayID == ctx->shader->atomics[i].array_id)
8552 return ctx->shader->atomics[i].hw_idx;
8553 }
8554 } else {
8555 uint32_t index = src->Register.Index;
8556 for (i = 0; i < ctx->shader->nhwatomic_ranges; i++) {
8557 if (ctx->shader->atomics[i].buffer_id != (unsigned)src->Dimension.Index)
8558 continue;
8559 if (index > ctx->shader->atomics[i].end)
8560 continue;
8561 if (index < ctx->shader->atomics[i].start)
8562 continue;
8563 uint32_t offset = (index - ctx->shader->atomics[i].start);
8564 return ctx->shader->atomics[i].hw_idx + offset;
8565 }
8566 }
8567 assert(0);
8568 return -1;
8569 }
8570
8571 static int tgsi_set_gds_temp(struct r600_shader_ctx *ctx,
8572 int *uav_id_p, int *uav_index_mode_p)
8573 {
8574 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8575 int uav_id, uav_index_mode = 0;
8576 int r;
8577 bool is_cm = (ctx->bc->chip_class == CAYMAN);
8578
8579 uav_id = find_hw_atomic_counter(ctx, &inst->Src[0]);
8580
8581 if (inst->Src[0].Register.Indirect) {
8582 if (is_cm) {
8583 struct r600_bytecode_alu alu;
8584 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8585 alu.op = ALU_OP2_LSHL_INT;
8586 alu.src[0].sel = get_address_file_reg(ctx, inst->Src[0].Indirect.Index);
8587 alu.src[0].chan = 0;
8588 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
8589 alu.src[1].value = 2;
8590 alu.dst.sel = ctx->temp_reg;
8591 alu.dst.chan = 0;
8592 alu.dst.write = 1;
8593 alu.last = 1;
8594 r = r600_bytecode_add_alu(ctx->bc, &alu);
8595 if (r)
8596 return r;
8597
8598 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
8599 ctx->temp_reg, 0,
8600 ctx->temp_reg, 0,
8601 V_SQ_ALU_SRC_LITERAL, uav_id * 4);
8602 if (r)
8603 return r;
8604 } else
8605 uav_index_mode = 2;
8606 } else if (is_cm) {
8607 r = single_alu_op2(ctx, ALU_OP1_MOV,
8608 ctx->temp_reg, 0,
8609 V_SQ_ALU_SRC_LITERAL, uav_id * 4,
8610 0, 0);
8611 if (r)
8612 return r;
8613 }
8614 *uav_id_p = uav_id;
8615 *uav_index_mode_p = uav_index_mode;
8616 return 0;
8617 }
8618
8619 static int tgsi_load_gds(struct r600_shader_ctx *ctx)
8620 {
8621 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8622 int r;
8623 struct r600_bytecode_gds gds;
8624 int uav_id = 0;
8625 int uav_index_mode = 0;
8626 bool is_cm = (ctx->bc->chip_class == CAYMAN);
8627
8628 r = tgsi_set_gds_temp(ctx, &uav_id, &uav_index_mode);
8629 if (r)
8630 return r;
8631
8632 memset(&gds, 0, sizeof(struct r600_bytecode_gds));
8633 gds.op = FETCH_OP_GDS_READ_RET;
8634 gds.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
8635 gds.uav_id = is_cm ? 0 : uav_id;
8636 gds.uav_index_mode = is_cm ? 0 : uav_index_mode;
8637 gds.src_gpr = ctx->temp_reg;
8638 gds.src_sel_x = (is_cm) ? 0 : 4;
8639 gds.src_sel_y = 4;
8640 gds.src_sel_z = 4;
8641 gds.dst_sel_x = 0;
8642 gds.dst_sel_y = 7;
8643 gds.dst_sel_z = 7;
8644 gds.dst_sel_w = 7;
8645 gds.src_gpr2 = 0;
8646 gds.alloc_consume = !is_cm;
8647 r = r600_bytecode_add_gds(ctx->bc, &gds);
8648 if (r)
8649 return r;
8650
8651 ctx->bc->cf_last->vpm = 1;
8652 return 0;
8653 }
8654
8655 /* this fixes up 1D arrays properly */
8656 static int load_index_src(struct r600_shader_ctx *ctx, int src_index, int *idx_gpr)
8657 {
8658 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8659 int r, i;
8660 struct r600_bytecode_alu alu;
8661 int temp_reg = r600_get_temp(ctx);
8662
8663 for (i = 0; i < 4; i++) {
8664 bool def_val = true, write_zero = false;
8665 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8666 alu.op = ALU_OP1_MOV;
8667 alu.dst.sel = temp_reg;
8668 alu.dst.chan = i;
8669
8670 switch (inst->Memory.Texture) {
8671 case TGSI_TEXTURE_BUFFER:
8672 case TGSI_TEXTURE_1D:
8673 if (i == 1 || i == 2 || i == 3) {
8674 write_zero = true;
8675 }
8676 break;
8677 case TGSI_TEXTURE_1D_ARRAY:
8678 if (i == 1 || i == 3)
8679 write_zero = true;
8680 else if (i == 2) {
8681 r600_bytecode_src(&alu.src[0], &ctx->src[src_index], 1);
8682 def_val = false;
8683 }
8684 break;
8685 case TGSI_TEXTURE_2D:
8686 if (i == 2 || i == 3)
8687 write_zero = true;
8688 break;
8689 default:
8690 if (i == 3)
8691 write_zero = true;
8692 break;
8693 }
8694
8695 if (write_zero) {
8696 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
8697 alu.src[0].value = 0;
8698 } else if (def_val) {
8699 r600_bytecode_src(&alu.src[0], &ctx->src[src_index], i);
8700 }
8701
8702 if (i == 3)
8703 alu.last = 1;
8704 alu.dst.write = 1;
8705 r = r600_bytecode_add_alu(ctx->bc, &alu);
8706 if (r)
8707 return r;
8708 }
8709 *idx_gpr = temp_reg;
8710 return 0;
8711 }
8712
8713 static int load_buffer_coord(struct r600_shader_ctx *ctx, int src_idx,
8714 int temp_reg)
8715 {
8716 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8717 int r;
8718 if (inst->Src[src_idx].Register.File == TGSI_FILE_IMMEDIATE) {
8719 int value = (ctx->literals[4 * inst->Src[src_idx].Register.Index + inst->Src[src_idx].Register.SwizzleX]);
8720 r = single_alu_op2(ctx, ALU_OP1_MOV,
8721 temp_reg, 0,
8722 V_SQ_ALU_SRC_LITERAL, value >> 2,
8723 0, 0);
8724 if (r)
8725 return r;
8726 } else {
8727 struct r600_bytecode_alu alu;
8728 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8729 alu.op = ALU_OP2_LSHR_INT;
8730 r600_bytecode_src(&alu.src[0], &ctx->src[src_idx], 0);
8731 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
8732 alu.src[1].value = 2;
8733 alu.dst.sel = temp_reg;
8734 alu.dst.write = 1;
8735 alu.last = 1;
8736 r = r600_bytecode_add_alu(ctx->bc, &alu);
8737 if (r)
8738 return r;
8739 }
8740 return 0;
8741 }
8742
8743 static int tgsi_load_buffer(struct r600_shader_ctx *ctx)
8744 {
8745 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8746 /* have to work out the offset into the RAT immediate return buffer */
8747 struct r600_bytecode_vtx vtx;
8748 struct r600_bytecode_cf *cf;
8749 int r;
8750 int temp_reg = r600_get_temp(ctx);
8751 unsigned rat_index_mode;
8752 unsigned base;
8753
8754 rat_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
8755 base = R600_IMAGE_REAL_RESOURCE_OFFSET + ctx->info.file_count[TGSI_FILE_IMAGE];
8756
8757 r = load_buffer_coord(ctx, 1, temp_reg);
8758 if (r)
8759 return r;
8760 ctx->bc->cf_last->barrier = 1;
8761 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
8762 vtx.op = FETCH_OP_VFETCH;
8763 vtx.buffer_id = inst->Src[0].Register.Index + base;
8764 vtx.buffer_index_mode = rat_index_mode;
8765 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
8766 vtx.src_gpr = temp_reg;
8767 vtx.src_sel_x = 0;
8768 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
8769 vtx.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; /* SEL_X */
8770 vtx.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7; /* SEL_Y */
8771 vtx.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7; /* SEL_Z */
8772 vtx.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7; /* SEL_W */
8773 vtx.num_format_all = 1;
8774 vtx.format_comp_all = 1;
8775 vtx.srf_mode_all = 0;
8776
8777 if (inst->Dst[0].Register.WriteMask & 8) {
8778 vtx.data_format = FMT_32_32_32_32;
8779 vtx.use_const_fields = 0;
8780 } else if (inst->Dst[0].Register.WriteMask & 4) {
8781 vtx.data_format = FMT_32_32_32;
8782 vtx.use_const_fields = 0;
8783 } else if (inst->Dst[0].Register.WriteMask & 2) {
8784 vtx.data_format = FMT_32_32;
8785 vtx.use_const_fields = 0;
8786 } else {
8787 vtx.data_format = FMT_32;
8788 vtx.use_const_fields = 0;
8789 }
8790
8791 r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx);
8792 if (r)
8793 return r;
8794 cf = ctx->bc->cf_last;
8795 cf->barrier = 1;
8796 return 0;
8797 }
8798
8799 static int tgsi_load_rat(struct r600_shader_ctx *ctx)
8800 {
8801 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8802 /* have to work out the offset into the RAT immediate return buffer */
8803 struct r600_bytecode_vtx vtx;
8804 struct r600_bytecode_cf *cf;
8805 int r;
8806 int idx_gpr;
8807 unsigned format, num_format, format_comp, endian;
8808 const struct util_format_description *desc;
8809 unsigned rat_index_mode;
8810 unsigned immed_base;
8811
8812 rat_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
8813
8814 immed_base = R600_IMAGE_IMMED_RESOURCE_OFFSET;
8815 r = load_index_src(ctx, 1, &idx_gpr);
8816 if (r)
8817 return r;
8818
8819 if (rat_index_mode)
8820 egcm_load_index_reg(ctx->bc, 1, false);
8821
8822 r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT);
8823 cf = ctx->bc->cf_last;
8824
8825 cf->rat.id = ctx->shader->rat_base + inst->Src[0].Register.Index;
8826 cf->rat.inst = V_RAT_INST_NOP_RTN;
8827 cf->rat.index_mode = rat_index_mode;
8828 cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_READ_IND;
8829 cf->output.gpr = ctx->thread_id_gpr;
8830 cf->output.index_gpr = idx_gpr;
8831 cf->output.comp_mask = 0xf;
8832 cf->output.burst_count = 1;
8833 cf->vpm = 1;
8834 cf->barrier = 1;
8835 cf->mark = 1;
8836 cf->output.elem_size = 0;
8837
8838 r600_bytecode_add_cfinst(ctx->bc, CF_OP_WAIT_ACK);
8839 cf = ctx->bc->cf_last;
8840 cf->barrier = 1;
8841
8842 desc = util_format_description(inst->Memory.Format);
8843 r600_vertex_data_type(inst->Memory.Format,
8844 &format, &num_format, &format_comp, &endian);
8845 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
8846 vtx.op = FETCH_OP_VFETCH;
8847 vtx.buffer_id = immed_base + inst->Src[0].Register.Index;
8848 vtx.buffer_index_mode = rat_index_mode;
8849 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
8850 vtx.src_gpr = ctx->thread_id_gpr;
8851 vtx.src_sel_x = 1;
8852 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
8853 vtx.dst_sel_x = desc->swizzle[0];
8854 vtx.dst_sel_y = desc->swizzle[1];
8855 vtx.dst_sel_z = desc->swizzle[2];
8856 vtx.dst_sel_w = desc->swizzle[3];
8857 vtx.srf_mode_all = 1;
8858 vtx.data_format = format;
8859 vtx.num_format_all = num_format;
8860 vtx.format_comp_all = format_comp;
8861 vtx.endian = endian;
8862 vtx.offset = 0;
8863 vtx.mega_fetch_count = 3;
8864 r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx);
8865 if (r)
8866 return r;
8867 cf = ctx->bc->cf_last;
8868 cf->barrier = 1;
8869 return 0;
8870 }
8871
8872 static int tgsi_load_lds(struct r600_shader_ctx *ctx)
8873 {
8874 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8875 struct r600_bytecode_alu alu;
8876 int r;
8877 int temp_reg = r600_get_temp(ctx);
8878
8879 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8880 alu.op = ALU_OP1_MOV;
8881 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
8882 alu.dst.sel = temp_reg;
8883 alu.dst.write = 1;
8884 alu.last = 1;
8885 r = r600_bytecode_add_alu(ctx->bc, &alu);
8886 if (r)
8887 return r;
8888
8889 r = do_lds_fetch_values(ctx, temp_reg,
8890 ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index, inst->Dst[0].Register.WriteMask);
8891 if (r)
8892 return r;
8893 return 0;
8894 }
8895
8896 static int tgsi_load(struct r600_shader_ctx *ctx)
8897 {
8898 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8899 if (inst->Src[0].Register.File == TGSI_FILE_IMAGE)
8900 return tgsi_load_rat(ctx);
8901 if (inst->Src[0].Register.File == TGSI_FILE_HW_ATOMIC)
8902 return tgsi_load_gds(ctx);
8903 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER)
8904 return tgsi_load_buffer(ctx);
8905 if (inst->Src[0].Register.File == TGSI_FILE_MEMORY)
8906 return tgsi_load_lds(ctx);
8907 return 0;
8908 }
8909
8910 static int tgsi_store_buffer_rat(struct r600_shader_ctx *ctx)
8911 {
8912 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8913 struct r600_bytecode_cf *cf;
8914 int r, i;
8915 unsigned rat_index_mode;
8916 int lasti;
8917 int temp_reg = r600_get_temp(ctx), treg2 = r600_get_temp(ctx);
8918
8919 r = load_buffer_coord(ctx, 0, treg2);
8920 if (r)
8921 return r;
8922
8923 rat_index_mode = inst->Dst[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
8924 if (rat_index_mode)
8925 egcm_load_index_reg(ctx->bc, 1, false);
8926
8927 for (i = 0; i <= 3; i++) {
8928 struct r600_bytecode_alu alu;
8929 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8930 alu.op = ALU_OP1_MOV;
8931 alu.dst.sel = temp_reg;
8932 alu.dst.chan = i;
8933 alu.src[0].sel = V_SQ_ALU_SRC_0;
8934 alu.last = (i == 3);
8935 alu.dst.write = 1;
8936 r = r600_bytecode_add_alu(ctx->bc, &alu);
8937 if (r)
8938 return r;
8939 }
8940
8941 lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
8942 for (i = 0; i <= lasti; i++) {
8943 struct r600_bytecode_alu alu;
8944 if (!((1 << i) & inst->Dst[0].Register.WriteMask))
8945 continue;
8946
8947 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
8948 temp_reg, 0,
8949 treg2, 0,
8950 V_SQ_ALU_SRC_LITERAL, i);
8951 if (r)
8952 return r;
8953
8954 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8955 alu.op = ALU_OP1_MOV;
8956 alu.dst.sel = ctx->temp_reg;
8957 alu.dst.chan = 0;
8958
8959 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
8960 alu.last = 1;
8961 alu.dst.write = 1;
8962 r = r600_bytecode_add_alu(ctx->bc, &alu);
8963 if (r)
8964 return r;
8965
8966 r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT);
8967 cf = ctx->bc->cf_last;
8968
8969 cf->rat.id = ctx->shader->rat_base + inst->Dst[0].Register.Index + ctx->info.file_count[TGSI_FILE_IMAGE];
8970 cf->rat.inst = V_RAT_INST_STORE_TYPED;
8971 cf->rat.index_mode = rat_index_mode;
8972 cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND;
8973 cf->output.gpr = ctx->temp_reg;
8974 cf->output.index_gpr = temp_reg;
8975 cf->output.comp_mask = 1;
8976 cf->output.burst_count = 1;
8977 cf->vpm = 1;
8978 cf->barrier = 1;
8979 cf->output.elem_size = 0;
8980 }
8981 return 0;
8982 }
8983
8984 static int tgsi_store_rat(struct r600_shader_ctx *ctx)
8985 {
8986 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8987 struct r600_bytecode_cf *cf;
8988 bool src_requires_loading = false;
8989 int val_gpr, idx_gpr;
8990 int r, i;
8991 unsigned rat_index_mode;
8992
8993 rat_index_mode = inst->Dst[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
8994
8995 r = load_index_src(ctx, 0, &idx_gpr);
8996 if (r)
8997 return r;
8998
8999 if (inst->Src[1].Register.File != TGSI_FILE_TEMPORARY)
9000 src_requires_loading = true;
9001
9002 if (src_requires_loading) {
9003 struct r600_bytecode_alu alu;
9004 for (i = 0; i < 4; i++) {
9005 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9006 alu.op = ALU_OP1_MOV;
9007 alu.dst.sel = ctx->temp_reg;
9008 alu.dst.chan = i;
9009
9010 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
9011 if (i == 3)
9012 alu.last = 1;
9013 alu.dst.write = 1;
9014 r = r600_bytecode_add_alu(ctx->bc, &alu);
9015 if (r)
9016 return r;
9017 }
9018 val_gpr = ctx->temp_reg;
9019 } else
9020 val_gpr = tgsi_tex_get_src_gpr(ctx, 1);
9021 if (rat_index_mode)
9022 egcm_load_index_reg(ctx->bc, 1, false);
9023
9024 r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT);
9025 cf = ctx->bc->cf_last;
9026
9027 cf->rat.id = ctx->shader->rat_base + inst->Dst[0].Register.Index;
9028 cf->rat.inst = V_RAT_INST_STORE_TYPED;
9029 cf->rat.index_mode = rat_index_mode;
9030 cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND;
9031 cf->output.gpr = val_gpr;
9032 cf->output.index_gpr = idx_gpr;
9033 cf->output.comp_mask = 0xf;
9034 cf->output.burst_count = 1;
9035 cf->vpm = 1;
9036 cf->barrier = 1;
9037 cf->output.elem_size = 0;
9038 return 0;
9039 }
9040
9041 static int tgsi_store_lds(struct r600_shader_ctx *ctx)
9042 {
9043 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9044 struct r600_bytecode_alu alu;
9045 int r, i, lasti;
9046 int write_mask = inst->Dst[0].Register.WriteMask;
9047 int temp_reg = r600_get_temp(ctx);
9048
9049 /* LDS write */
9050 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9051 alu.op = ALU_OP1_MOV;
9052 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9053 alu.dst.sel = temp_reg;
9054 alu.dst.write = 1;
9055 alu.last = 1;
9056 r = r600_bytecode_add_alu(ctx->bc, &alu);
9057 if (r)
9058 return r;
9059
9060 lasti = tgsi_last_instruction(write_mask);
9061 for (i = 1; i <= lasti; i++) {
9062 if (!(write_mask & (1 << i)))
9063 continue;
9064 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
9065 temp_reg, i,
9066 temp_reg, 0,
9067 V_SQ_ALU_SRC_LITERAL, 4 * i);
9068 if (r)
9069 return r;
9070 }
9071 for (i = 0; i <= lasti; i++) {
9072 if (!(write_mask & (1 << i)))
9073 continue;
9074
9075 if ((i == 0 && ((write_mask & 3) == 3)) ||
9076 (i == 2 && ((write_mask & 0xc) == 0xc))) {
9077 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9078 alu.op = LDS_OP3_LDS_WRITE_REL;
9079
9080 alu.src[0].sel = temp_reg;
9081 alu.src[0].chan = i;
9082 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
9083 r600_bytecode_src(&alu.src[2], &ctx->src[1], i + 1);
9084 alu.last = 1;
9085 alu.is_lds_idx_op = true;
9086 alu.lds_idx = 1;
9087 r = r600_bytecode_add_alu(ctx->bc, &alu);
9088 if (r)
9089 return r;
9090 i += 1;
9091 continue;
9092 }
9093 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9094 alu.op = LDS_OP2_LDS_WRITE;
9095
9096 alu.src[0].sel = temp_reg;
9097 alu.src[0].chan = i;
9098 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
9099
9100 alu.last = 1;
9101 alu.is_lds_idx_op = true;
9102
9103 r = r600_bytecode_add_alu(ctx->bc, &alu);
9104 if (r)
9105 return r;
9106 }
9107 return 0;
9108 }
9109
9110 static int tgsi_store(struct r600_shader_ctx *ctx)
9111 {
9112 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9113 if (inst->Dst[0].Register.File == TGSI_FILE_BUFFER)
9114 return tgsi_store_buffer_rat(ctx);
9115 else if (inst->Dst[0].Register.File == TGSI_FILE_MEMORY)
9116 return tgsi_store_lds(ctx);
9117 else
9118 return tgsi_store_rat(ctx);
9119 }
9120
9121 static int tgsi_atomic_op_rat(struct r600_shader_ctx *ctx)
9122 {
9123 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9124 /* have to work out the offset into the RAT immediate return buffer */
9125 struct r600_bytecode_alu alu;
9126 struct r600_bytecode_vtx vtx;
9127 struct r600_bytecode_cf *cf;
9128 int r;
9129 int idx_gpr;
9130 unsigned format, num_format, format_comp, endian;
9131 const struct util_format_description *desc;
9132 unsigned rat_index_mode;
9133 unsigned immed_base;
9134 unsigned rat_base;
9135
9136 immed_base = R600_IMAGE_IMMED_RESOURCE_OFFSET;
9137 rat_base = ctx->shader->rat_base;
9138
9139 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
9140 immed_base += ctx->info.file_count[TGSI_FILE_IMAGE];
9141 rat_base += ctx->info.file_count[TGSI_FILE_IMAGE];
9142
9143 r = load_buffer_coord(ctx, 1, ctx->temp_reg);
9144 if (r)
9145 return r;
9146 idx_gpr = ctx->temp_reg;
9147 } else {
9148 r = load_index_src(ctx, 1, &idx_gpr);
9149 if (r)
9150 return r;
9151 }
9152
9153 rat_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
9154
9155 if (ctx->inst_info->op == V_RAT_INST_CMPXCHG_INT_RTN) {
9156 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9157 alu.op = ALU_OP1_MOV;
9158 alu.dst.sel = ctx->thread_id_gpr;
9159 alu.dst.chan = 0;
9160 alu.dst.write = 1;
9161 r600_bytecode_src(&alu.src[0], &ctx->src[3], 0);
9162 alu.last = 1;
9163 r = r600_bytecode_add_alu(ctx->bc, &alu);
9164 if (r)
9165 return r;
9166
9167 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9168 alu.op = ALU_OP1_MOV;
9169 alu.dst.sel = ctx->thread_id_gpr;
9170 if (ctx->bc->chip_class == CAYMAN)
9171 alu.dst.chan = 2;
9172 else
9173 alu.dst.chan = 3;
9174 alu.dst.write = 1;
9175 r600_bytecode_src(&alu.src[0], &ctx->src[2], 0);
9176 alu.last = 1;
9177 r = r600_bytecode_add_alu(ctx->bc, &alu);
9178 if (r)
9179 return r;
9180 } else {
9181 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9182 alu.op = ALU_OP1_MOV;
9183 alu.dst.sel = ctx->thread_id_gpr;
9184 alu.dst.chan = 0;
9185 alu.dst.write = 1;
9186 r600_bytecode_src(&alu.src[0], &ctx->src[2], 0);
9187 alu.last = 1;
9188 r = r600_bytecode_add_alu(ctx->bc, &alu);
9189 if (r)
9190 return r;
9191 }
9192
9193 if (rat_index_mode)
9194 egcm_load_index_reg(ctx->bc, 1, false);
9195 r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT);
9196 cf = ctx->bc->cf_last;
9197
9198 cf->rat.id = rat_base + inst->Src[0].Register.Index;
9199 cf->rat.inst = ctx->inst_info->op;
9200 cf->rat.index_mode = rat_index_mode;
9201 cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_READ_IND;
9202 cf->output.gpr = ctx->thread_id_gpr;
9203 cf->output.index_gpr = idx_gpr;
9204 cf->output.comp_mask = 0xf;
9205 cf->output.burst_count = 1;
9206 cf->vpm = 1;
9207 cf->barrier = 1;
9208 cf->mark = 1;
9209 cf->output.elem_size = 0;
9210 r600_bytecode_add_cfinst(ctx->bc, CF_OP_WAIT_ACK);
9211 cf = ctx->bc->cf_last;
9212 cf->barrier = 1;
9213 cf->cf_addr = 1;
9214
9215 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
9216 if (inst->Src[0].Register.File == TGSI_FILE_IMAGE) {
9217 desc = util_format_description(inst->Memory.Format);
9218 r600_vertex_data_type(inst->Memory.Format,
9219 &format, &num_format, &format_comp, &endian);
9220 vtx.dst_sel_x = desc->swizzle[0];
9221 } else {
9222 format = FMT_32;
9223 num_format = 1;
9224 format_comp = 0;
9225 endian = 0;
9226 vtx.dst_sel_x = 0;
9227 }
9228 vtx.op = FETCH_OP_VFETCH;
9229 vtx.buffer_id = immed_base + inst->Src[0].Register.Index;
9230 vtx.buffer_index_mode = rat_index_mode;
9231 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
9232 vtx.src_gpr = ctx->thread_id_gpr;
9233 vtx.src_sel_x = 1;
9234 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
9235 vtx.dst_sel_y = 7;
9236 vtx.dst_sel_z = 7;
9237 vtx.dst_sel_w = 7;
9238 vtx.use_const_fields = 0;
9239 vtx.srf_mode_all = 1;
9240 vtx.data_format = format;
9241 vtx.num_format_all = num_format;
9242 vtx.format_comp_all = format_comp;
9243 vtx.endian = endian;
9244 vtx.offset = 0;
9245 vtx.mega_fetch_count = 0xf;
9246 r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx);
9247 if (r)
9248 return r;
9249 cf = ctx->bc->cf_last;
9250 cf->vpm = 1;
9251 cf->barrier = 1;
9252 return 0;
9253 }
9254
9255 static int get_gds_op(int opcode)
9256 {
9257 switch (opcode) {
9258 case TGSI_OPCODE_ATOMUADD:
9259 return FETCH_OP_GDS_ADD_RET;
9260 case TGSI_OPCODE_ATOMAND:
9261 return FETCH_OP_GDS_AND_RET;
9262 case TGSI_OPCODE_ATOMOR:
9263 return FETCH_OP_GDS_OR_RET;
9264 case TGSI_OPCODE_ATOMXOR:
9265 return FETCH_OP_GDS_XOR_RET;
9266 case TGSI_OPCODE_ATOMUMIN:
9267 return FETCH_OP_GDS_MIN_UINT_RET;
9268 case TGSI_OPCODE_ATOMUMAX:
9269 return FETCH_OP_GDS_MAX_UINT_RET;
9270 case TGSI_OPCODE_ATOMXCHG:
9271 return FETCH_OP_GDS_XCHG_RET;
9272 case TGSI_OPCODE_ATOMCAS:
9273 return FETCH_OP_GDS_CMP_XCHG_RET;
9274 default:
9275 return -1;
9276 }
9277 }
9278
9279 static int tgsi_atomic_op_gds(struct r600_shader_ctx *ctx)
9280 {
9281 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9282 struct r600_bytecode_gds gds;
9283 struct r600_bytecode_alu alu;
9284 int gds_op = get_gds_op(inst->Instruction.Opcode);
9285 int r;
9286 int uav_id = 0;
9287 int uav_index_mode = 0;
9288 bool is_cm = (ctx->bc->chip_class == CAYMAN);
9289
9290 if (gds_op == -1) {
9291 fprintf(stderr, "unknown GDS op for opcode %d\n", inst->Instruction.Opcode);
9292 return -1;
9293 }
9294
9295 r = tgsi_set_gds_temp(ctx, &uav_id, &uav_index_mode);
9296 if (r)
9297 return r;
9298
9299 if (gds_op == FETCH_OP_GDS_CMP_XCHG_RET) {
9300 if (inst->Src[3].Register.File == TGSI_FILE_IMMEDIATE) {
9301 int value = (ctx->literals[4 * inst->Src[3].Register.Index + inst->Src[3].Register.SwizzleX]);
9302 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9303 alu.op = ALU_OP1_MOV;
9304 alu.dst.sel = ctx->temp_reg;
9305 alu.dst.chan = is_cm ? 2 : 1;
9306 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
9307 alu.src[0].value = value;
9308 alu.last = 1;
9309 alu.dst.write = 1;
9310 r = r600_bytecode_add_alu(ctx->bc, &alu);
9311 if (r)
9312 return r;
9313 } else {
9314 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9315 alu.op = ALU_OP1_MOV;
9316 alu.dst.sel = ctx->temp_reg;
9317 alu.dst.chan = is_cm ? 2 : 1;
9318 r600_bytecode_src(&alu.src[0], &ctx->src[3], 0);
9319 alu.last = 1;
9320 alu.dst.write = 1;
9321 r = r600_bytecode_add_alu(ctx->bc, &alu);
9322 if (r)
9323 return r;
9324 }
9325 }
9326 if (inst->Src[2].Register.File == TGSI_FILE_IMMEDIATE) {
9327 int value = (ctx->literals[4 * inst->Src[2].Register.Index + inst->Src[2].Register.SwizzleX]);
9328 int abs_value = abs(value);
9329 if (abs_value != value && gds_op == FETCH_OP_GDS_ADD_RET)
9330 gds_op = FETCH_OP_GDS_SUB_RET;
9331 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9332 alu.op = ALU_OP1_MOV;
9333 alu.dst.sel = ctx->temp_reg;
9334 alu.dst.chan = is_cm ? 1 : 0;
9335 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
9336 alu.src[0].value = abs_value;
9337 alu.last = 1;
9338 alu.dst.write = 1;
9339 r = r600_bytecode_add_alu(ctx->bc, &alu);
9340 if (r)
9341 return r;
9342 } else {
9343 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9344 alu.op = ALU_OP1_MOV;
9345 alu.dst.sel = ctx->temp_reg;
9346 alu.dst.chan = is_cm ? 1 : 0;
9347 r600_bytecode_src(&alu.src[0], &ctx->src[2], 0);
9348 alu.last = 1;
9349 alu.dst.write = 1;
9350 r = r600_bytecode_add_alu(ctx->bc, &alu);
9351 if (r)
9352 return r;
9353 }
9354
9355
9356 memset(&gds, 0, sizeof(struct r600_bytecode_gds));
9357 gds.op = gds_op;
9358 gds.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
9359 gds.uav_id = is_cm ? 0 : uav_id;
9360 gds.uav_index_mode = is_cm ? 0 : uav_index_mode;
9361 gds.src_gpr = ctx->temp_reg;
9362 gds.src_gpr2 = 0;
9363 gds.src_sel_x = is_cm ? 0 : 4;
9364 gds.src_sel_y = is_cm ? 1 : 0;
9365 if (gds_op == FETCH_OP_GDS_CMP_XCHG_RET)
9366 gds.src_sel_z = is_cm ? 2 : 1;
9367 else
9368 gds.src_sel_z = 7;
9369 gds.dst_sel_x = 0;
9370 gds.dst_sel_y = 7;
9371 gds.dst_sel_z = 7;
9372 gds.dst_sel_w = 7;
9373 gds.alloc_consume = !is_cm;
9374
9375 r = r600_bytecode_add_gds(ctx->bc, &gds);
9376 if (r)
9377 return r;
9378 ctx->bc->cf_last->vpm = 1;
9379 return 0;
9380 }
9381
9382 static int get_lds_op(int opcode)
9383 {
9384 switch (opcode) {
9385 case TGSI_OPCODE_ATOMUADD:
9386 return LDS_OP2_LDS_ADD_RET;
9387 case TGSI_OPCODE_ATOMAND:
9388 return LDS_OP2_LDS_AND_RET;
9389 case TGSI_OPCODE_ATOMOR:
9390 return LDS_OP2_LDS_OR_RET;
9391 case TGSI_OPCODE_ATOMXOR:
9392 return LDS_OP2_LDS_XOR_RET;
9393 case TGSI_OPCODE_ATOMUMIN:
9394 return LDS_OP2_LDS_MIN_UINT_RET;
9395 case TGSI_OPCODE_ATOMUMAX:
9396 return LDS_OP2_LDS_MAX_UINT_RET;
9397 case TGSI_OPCODE_ATOMIMIN:
9398 return LDS_OP2_LDS_MIN_INT_RET;
9399 case TGSI_OPCODE_ATOMIMAX:
9400 return LDS_OP2_LDS_MAX_INT_RET;
9401 case TGSI_OPCODE_ATOMXCHG:
9402 return LDS_OP2_LDS_XCHG_RET;
9403 case TGSI_OPCODE_ATOMCAS:
9404 return LDS_OP3_LDS_CMP_XCHG_RET;
9405 default:
9406 return -1;
9407 }
9408 }
9409
9410 static int tgsi_atomic_op_lds(struct r600_shader_ctx *ctx)
9411 {
9412 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9413 int lds_op = get_lds_op(inst->Instruction.Opcode);
9414 int r;
9415
9416 struct r600_bytecode_alu alu;
9417 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9418 alu.op = lds_op;
9419 alu.is_lds_idx_op = true;
9420 alu.last = 1;
9421 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
9422 r600_bytecode_src(&alu.src[1], &ctx->src[2], 0);
9423 if (lds_op == LDS_OP3_LDS_CMP_XCHG_RET)
9424 r600_bytecode_src(&alu.src[2], &ctx->src[3], 0);
9425 else
9426 alu.src[2].sel = V_SQ_ALU_SRC_0;
9427 r = r600_bytecode_add_alu(ctx->bc, &alu);
9428 if (r)
9429 return r;
9430
9431 /* then read from LDS_OQ_A_POP */
9432 memset(&alu, 0, sizeof(alu));
9433
9434 alu.op = ALU_OP1_MOV;
9435 alu.src[0].sel = EG_V_SQ_ALU_SRC_LDS_OQ_A_POP;
9436 alu.src[0].chan = 0;
9437 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
9438 alu.dst.write = 1;
9439 alu.last = 1;
9440 r = r600_bytecode_add_alu(ctx->bc, &alu);
9441 if (r)
9442 return r;
9443
9444 return 0;
9445 }
9446
9447 static int tgsi_atomic_op(struct r600_shader_ctx *ctx)
9448 {
9449 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9450 if (inst->Src[0].Register.File == TGSI_FILE_IMAGE)
9451 return tgsi_atomic_op_rat(ctx);
9452 if (inst->Src[0].Register.File == TGSI_FILE_HW_ATOMIC)
9453 return tgsi_atomic_op_gds(ctx);
9454 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER)
9455 return tgsi_atomic_op_rat(ctx);
9456 if (inst->Src[0].Register.File == TGSI_FILE_MEMORY)
9457 return tgsi_atomic_op_lds(ctx);
9458 return 0;
9459 }
9460
9461 static int tgsi_resq(struct r600_shader_ctx *ctx)
9462 {
9463 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9464 unsigned sampler_index_mode;
9465 struct r600_bytecode_tex tex;
9466 int r;
9467 boolean has_txq_cube_array_z = false;
9468
9469 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER ||
9470 (inst->Src[0].Register.File == TGSI_FILE_IMAGE && inst->Memory.Texture == TGSI_TEXTURE_BUFFER)) {
9471 if (ctx->bc->chip_class < EVERGREEN)
9472 ctx->shader->uses_tex_buffers = true;
9473 unsigned eg_buffer_base = 0;
9474 eg_buffer_base = R600_IMAGE_REAL_RESOURCE_OFFSET;
9475 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER)
9476 eg_buffer_base += ctx->info.file_count[TGSI_FILE_IMAGE];
9477 return r600_do_buffer_txq(ctx, 0, ctx->shader->image_size_const_offset, eg_buffer_base);
9478 }
9479
9480 if (inst->Memory.Texture == TGSI_TEXTURE_CUBE_ARRAY &&
9481 inst->Dst[0].Register.WriteMask & 4) {
9482 ctx->shader->has_txq_cube_array_z_comp = true;
9483 has_txq_cube_array_z = true;
9484 }
9485
9486 sampler_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
9487 if (sampler_index_mode)
9488 egcm_load_index_reg(ctx->bc, 1, false);
9489
9490
9491 /* does this shader want a num layers from TXQ for a cube array? */
9492 if (has_txq_cube_array_z) {
9493 int id = tgsi_tex_get_src_gpr(ctx, 0) + ctx->shader->image_size_const_offset;
9494 struct r600_bytecode_alu alu;
9495
9496 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9497 alu.op = ALU_OP1_MOV;
9498
9499 alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
9500 /* with eg each dword is either number of cubes */
9501 alu.src[0].sel += id / 4;
9502 alu.src[0].chan = id % 4;
9503 alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
9504 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
9505 alu.last = 1;
9506 r = r600_bytecode_add_alu(ctx->bc, &alu);
9507 if (r)
9508 return r;
9509 /* disable writemask from texture instruction */
9510 inst->Dst[0].Register.WriteMask &= ~4;
9511 }
9512 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
9513 tex.op = ctx->inst_info->op;
9514 tex.sampler_id = R600_IMAGE_REAL_RESOURCE_OFFSET + inst->Src[0].Register.Index;
9515 tex.sampler_index_mode = sampler_index_mode;
9516 tex.resource_id = tex.sampler_id;
9517 tex.resource_index_mode = sampler_index_mode;
9518 tex.src_sel_x = 4;
9519 tex.src_sel_y = 4;
9520 tex.src_sel_z = 4;
9521 tex.src_sel_w = 4;
9522 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
9523 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
9524 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
9525 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
9526 tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
9527 r = r600_bytecode_add_tex(ctx->bc, &tex);
9528 if (r)
9529 return r;
9530
9531 return 0;
9532 }
9533
9534 static int tgsi_lrp(struct r600_shader_ctx *ctx)
9535 {
9536 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9537 struct r600_bytecode_alu alu;
9538 unsigned lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
9539 struct r600_bytecode_alu_src srcs[2][4];
9540 unsigned i;
9541 int r;
9542
9543 /* optimize if it's just an equal balance */
9544 if (ctx->src[0].sel == V_SQ_ALU_SRC_0_5) {
9545 for (i = 0; i < lasti + 1; i++) {
9546 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
9547 continue;
9548
9549 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9550 alu.op = ALU_OP2_ADD;
9551 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
9552 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
9553 alu.omod = 3;
9554 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
9555 alu.dst.chan = i;
9556 if (i == lasti) {
9557 alu.last = 1;
9558 }
9559 r = r600_bytecode_add_alu(ctx->bc, &alu);
9560 if (r)
9561 return r;
9562 }
9563 return 0;
9564 }
9565
9566 /* 1 - src0 */
9567 for (i = 0; i < lasti + 1; i++) {
9568 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
9569 continue;
9570
9571 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9572 alu.op = ALU_OP2_ADD;
9573 alu.src[0].sel = V_SQ_ALU_SRC_1;
9574 alu.src[0].chan = 0;
9575 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
9576 r600_bytecode_src_toggle_neg(&alu.src[1]);
9577 alu.dst.sel = ctx->temp_reg;
9578 alu.dst.chan = i;
9579 if (i == lasti) {
9580 alu.last = 1;
9581 }
9582 alu.dst.write = 1;
9583 r = r600_bytecode_add_alu(ctx->bc, &alu);
9584 if (r)
9585 return r;
9586 }
9587
9588 /* (1 - src0) * src2 */
9589 for (i = 0; i < lasti + 1; i++) {
9590 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
9591 continue;
9592
9593 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9594 alu.op = ALU_OP2_MUL;
9595 alu.src[0].sel = ctx->temp_reg;
9596 alu.src[0].chan = i;
9597 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
9598 alu.dst.sel = ctx->temp_reg;
9599 alu.dst.chan = i;
9600 if (i == lasti) {
9601 alu.last = 1;
9602 }
9603 alu.dst.write = 1;
9604 r = r600_bytecode_add_alu(ctx->bc, &alu);
9605 if (r)
9606 return r;
9607 }
9608
9609 /* src0 * src1 + (1 - src0) * src2 */
9610
9611 for (i = 0; i < 2; i++) {
9612 r = tgsi_make_src_for_op3(ctx, inst->Dst[0].Register.WriteMask,
9613 srcs[i], &ctx->src[i]);
9614 if (r)
9615 return r;
9616 }
9617
9618 for (i = 0; i < lasti + 1; i++) {
9619 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
9620 continue;
9621
9622 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9623 alu.op = ALU_OP3_MULADD;
9624 alu.is_op3 = 1;
9625 alu.src[0] = srcs[0][i];
9626 alu.src[1] = srcs[1][i];
9627 alu.src[2].sel = ctx->temp_reg;
9628 alu.src[2].chan = i;
9629
9630 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
9631 alu.dst.chan = i;
9632 if (i == lasti) {
9633 alu.last = 1;
9634 }
9635 r = r600_bytecode_add_alu(ctx->bc, &alu);
9636 if (r)
9637 return r;
9638 }
9639 return 0;
9640 }
9641
9642 static int tgsi_cmp(struct r600_shader_ctx *ctx)
9643 {
9644 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9645 struct r600_bytecode_alu alu;
9646 int i, r, j;
9647 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
9648 struct r600_bytecode_alu_src srcs[3][4];
9649
9650 unsigned op;
9651
9652 if (ctx->src[0].abs && ctx->src[0].neg) {
9653 op = ALU_OP3_CNDE;
9654 ctx->src[0].abs = 0;
9655 ctx->src[0].neg = 0;
9656 } else {
9657 op = ALU_OP3_CNDGE;
9658 }
9659
9660 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
9661 r = tgsi_make_src_for_op3(ctx, inst->Dst[0].Register.WriteMask,
9662 srcs[j], &ctx->src[j]);
9663 if (r)
9664 return r;
9665 }
9666
9667 for (i = 0; i < lasti + 1; i++) {
9668 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
9669 continue;
9670
9671 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9672 alu.op = op;
9673 alu.src[0] = srcs[0][i];
9674 alu.src[1] = srcs[2][i];
9675 alu.src[2] = srcs[1][i];
9676
9677 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
9678 alu.dst.chan = i;
9679 alu.dst.write = 1;
9680 alu.is_op3 = 1;
9681 if (i == lasti)
9682 alu.last = 1;
9683 r = r600_bytecode_add_alu(ctx->bc, &alu);
9684 if (r)
9685 return r;
9686 }
9687 return 0;
9688 }
9689
9690 static int tgsi_ucmp(struct r600_shader_ctx *ctx)
9691 {
9692 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9693 struct r600_bytecode_alu alu;
9694 int i, r;
9695 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
9696
9697 for (i = 0; i < lasti + 1; i++) {
9698 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
9699 continue;
9700
9701 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9702 alu.op = ALU_OP3_CNDE_INT;
9703 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
9704 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
9705 r600_bytecode_src(&alu.src[2], &ctx->src[1], i);
9706 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
9707 alu.dst.chan = i;
9708 alu.dst.write = 1;
9709 alu.is_op3 = 1;
9710 if (i == lasti)
9711 alu.last = 1;
9712 r = r600_bytecode_add_alu(ctx->bc, &alu);
9713 if (r)
9714 return r;
9715 }
9716 return 0;
9717 }
9718
9719 static int tgsi_exp(struct r600_shader_ctx *ctx)
9720 {
9721 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9722 struct r600_bytecode_alu alu;
9723 int r;
9724 unsigned i;
9725
9726 /* result.x = 2^floor(src); */
9727 if (inst->Dst[0].Register.WriteMask & 1) {
9728 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9729
9730 alu.op = ALU_OP1_FLOOR;
9731 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9732
9733 alu.dst.sel = ctx->temp_reg;
9734 alu.dst.chan = 0;
9735 alu.dst.write = 1;
9736 alu.last = 1;
9737 r = r600_bytecode_add_alu(ctx->bc, &alu);
9738 if (r)
9739 return r;
9740
9741 if (ctx->bc->chip_class == CAYMAN) {
9742 for (i = 0; i < 3; i++) {
9743 alu.op = ALU_OP1_EXP_IEEE;
9744 alu.src[0].sel = ctx->temp_reg;
9745 alu.src[0].chan = 0;
9746
9747 alu.dst.sel = ctx->temp_reg;
9748 alu.dst.chan = i;
9749 alu.dst.write = i == 0;
9750 alu.last = i == 2;
9751 r = r600_bytecode_add_alu(ctx->bc, &alu);
9752 if (r)
9753 return r;
9754 }
9755 } else {
9756 alu.op = ALU_OP1_EXP_IEEE;
9757 alu.src[0].sel = ctx->temp_reg;
9758 alu.src[0].chan = 0;
9759
9760 alu.dst.sel = ctx->temp_reg;
9761 alu.dst.chan = 0;
9762 alu.dst.write = 1;
9763 alu.last = 1;
9764 r = r600_bytecode_add_alu(ctx->bc, &alu);
9765 if (r)
9766 return r;
9767 }
9768 }
9769
9770 /* result.y = tmp - floor(tmp); */
9771 if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
9772 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9773
9774 alu.op = ALU_OP1_FRACT;
9775 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9776
9777 alu.dst.sel = ctx->temp_reg;
9778 #if 0
9779 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
9780 if (r)
9781 return r;
9782 #endif
9783 alu.dst.write = 1;
9784 alu.dst.chan = 1;
9785
9786 alu.last = 1;
9787
9788 r = r600_bytecode_add_alu(ctx->bc, &alu);
9789 if (r)
9790 return r;
9791 }
9792
9793 /* result.z = RoughApprox2ToX(tmp);*/
9794 if ((inst->Dst[0].Register.WriteMask >> 2) & 0x1) {
9795 if (ctx->bc->chip_class == CAYMAN) {
9796 for (i = 0; i < 3; i++) {
9797 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9798 alu.op = ALU_OP1_EXP_IEEE;
9799 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9800
9801 alu.dst.sel = ctx->temp_reg;
9802 alu.dst.chan = i;
9803 if (i == 2) {
9804 alu.dst.write = 1;
9805 alu.last = 1;
9806 }
9807
9808 r = r600_bytecode_add_alu(ctx->bc, &alu);
9809 if (r)
9810 return r;
9811 }
9812 } else {
9813 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9814 alu.op = ALU_OP1_EXP_IEEE;
9815 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9816
9817 alu.dst.sel = ctx->temp_reg;
9818 alu.dst.write = 1;
9819 alu.dst.chan = 2;
9820
9821 alu.last = 1;
9822
9823 r = r600_bytecode_add_alu(ctx->bc, &alu);
9824 if (r)
9825 return r;
9826 }
9827 }
9828
9829 /* result.w = 1.0;*/
9830 if ((inst->Dst[0].Register.WriteMask >> 3) & 0x1) {
9831 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9832
9833 alu.op = ALU_OP1_MOV;
9834 alu.src[0].sel = V_SQ_ALU_SRC_1;
9835 alu.src[0].chan = 0;
9836
9837 alu.dst.sel = ctx->temp_reg;
9838 alu.dst.chan = 3;
9839 alu.dst.write = 1;
9840 alu.last = 1;
9841 r = r600_bytecode_add_alu(ctx->bc, &alu);
9842 if (r)
9843 return r;
9844 }
9845 return tgsi_helper_copy(ctx, inst);
9846 }
9847
9848 static int tgsi_log(struct r600_shader_ctx *ctx)
9849 {
9850 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9851 struct r600_bytecode_alu alu;
9852 int r;
9853 unsigned i;
9854
9855 /* result.x = floor(log2(|src|)); */
9856 if (inst->Dst[0].Register.WriteMask & 1) {
9857 if (ctx->bc->chip_class == CAYMAN) {
9858 for (i = 0; i < 3; i++) {
9859 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9860
9861 alu.op = ALU_OP1_LOG_IEEE;
9862 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9863 r600_bytecode_src_set_abs(&alu.src[0]);
9864
9865 alu.dst.sel = ctx->temp_reg;
9866 alu.dst.chan = i;
9867 if (i == 0)
9868 alu.dst.write = 1;
9869 if (i == 2)
9870 alu.last = 1;
9871 r = r600_bytecode_add_alu(ctx->bc, &alu);
9872 if (r)
9873 return r;
9874 }
9875
9876 } else {
9877 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9878
9879 alu.op = ALU_OP1_LOG_IEEE;
9880 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9881 r600_bytecode_src_set_abs(&alu.src[0]);
9882
9883 alu.dst.sel = ctx->temp_reg;
9884 alu.dst.chan = 0;
9885 alu.dst.write = 1;
9886 alu.last = 1;
9887 r = r600_bytecode_add_alu(ctx->bc, &alu);
9888 if (r)
9889 return r;
9890 }
9891
9892 alu.op = ALU_OP1_FLOOR;
9893 alu.src[0].sel = ctx->temp_reg;
9894 alu.src[0].chan = 0;
9895
9896 alu.dst.sel = ctx->temp_reg;
9897 alu.dst.chan = 0;
9898 alu.dst.write = 1;
9899 alu.last = 1;
9900
9901 r = r600_bytecode_add_alu(ctx->bc, &alu);
9902 if (r)
9903 return r;
9904 }
9905
9906 /* result.y = |src.x| / (2 ^ floor(log2(|src.x|))); */
9907 if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
9908
9909 if (ctx->bc->chip_class == CAYMAN) {
9910 for (i = 0; i < 3; i++) {
9911 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9912
9913 alu.op = ALU_OP1_LOG_IEEE;
9914 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9915 r600_bytecode_src_set_abs(&alu.src[0]);
9916
9917 alu.dst.sel = ctx->temp_reg;
9918 alu.dst.chan = i;
9919 if (i == 1)
9920 alu.dst.write = 1;
9921 if (i == 2)
9922 alu.last = 1;
9923
9924 r = r600_bytecode_add_alu(ctx->bc, &alu);
9925 if (r)
9926 return r;
9927 }
9928 } else {
9929 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9930
9931 alu.op = ALU_OP1_LOG_IEEE;
9932 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9933 r600_bytecode_src_set_abs(&alu.src[0]);
9934
9935 alu.dst.sel = ctx->temp_reg;
9936 alu.dst.chan = 1;
9937 alu.dst.write = 1;
9938 alu.last = 1;
9939
9940 r = r600_bytecode_add_alu(ctx->bc, &alu);
9941 if (r)
9942 return r;
9943 }
9944
9945 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9946
9947 alu.op = ALU_OP1_FLOOR;
9948 alu.src[0].sel = ctx->temp_reg;
9949 alu.src[0].chan = 1;
9950
9951 alu.dst.sel = ctx->temp_reg;
9952 alu.dst.chan = 1;
9953 alu.dst.write = 1;
9954 alu.last = 1;
9955
9956 r = r600_bytecode_add_alu(ctx->bc, &alu);
9957 if (r)
9958 return r;
9959
9960 if (ctx->bc->chip_class == CAYMAN) {
9961 for (i = 0; i < 3; i++) {
9962 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9963 alu.op = ALU_OP1_EXP_IEEE;
9964 alu.src[0].sel = ctx->temp_reg;
9965 alu.src[0].chan = 1;
9966
9967 alu.dst.sel = ctx->temp_reg;
9968 alu.dst.chan = i;
9969 if (i == 1)
9970 alu.dst.write = 1;
9971 if (i == 2)
9972 alu.last = 1;
9973
9974 r = r600_bytecode_add_alu(ctx->bc, &alu);
9975 if (r)
9976 return r;
9977 }
9978 } else {
9979 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9980 alu.op = ALU_OP1_EXP_IEEE;
9981 alu.src[0].sel = ctx->temp_reg;
9982 alu.src[0].chan = 1;
9983
9984 alu.dst.sel = ctx->temp_reg;
9985 alu.dst.chan = 1;
9986 alu.dst.write = 1;
9987 alu.last = 1;
9988
9989 r = r600_bytecode_add_alu(ctx->bc, &alu);
9990 if (r)
9991 return r;
9992 }
9993
9994 if (ctx->bc->chip_class == CAYMAN) {
9995 for (i = 0; i < 3; i++) {
9996 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9997 alu.op = ALU_OP1_RECIP_IEEE;
9998 alu.src[0].sel = ctx->temp_reg;
9999 alu.src[0].chan = 1;
10000
10001 alu.dst.sel = ctx->temp_reg;
10002 alu.dst.chan = i;
10003 if (i == 1)
10004 alu.dst.write = 1;
10005 if (i == 2)
10006 alu.last = 1;
10007
10008 r = r600_bytecode_add_alu(ctx->bc, &alu);
10009 if (r)
10010 return r;
10011 }
10012 } else {
10013 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10014 alu.op = ALU_OP1_RECIP_IEEE;
10015 alu.src[0].sel = ctx->temp_reg;
10016 alu.src[0].chan = 1;
10017
10018 alu.dst.sel = ctx->temp_reg;
10019 alu.dst.chan = 1;
10020 alu.dst.write = 1;
10021 alu.last = 1;
10022
10023 r = r600_bytecode_add_alu(ctx->bc, &alu);
10024 if (r)
10025 return r;
10026 }
10027
10028 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10029
10030 alu.op = ALU_OP2_MUL;
10031
10032 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
10033 r600_bytecode_src_set_abs(&alu.src[0]);
10034
10035 alu.src[1].sel = ctx->temp_reg;
10036 alu.src[1].chan = 1;
10037
10038 alu.dst.sel = ctx->temp_reg;
10039 alu.dst.chan = 1;
10040 alu.dst.write = 1;
10041 alu.last = 1;
10042
10043 r = r600_bytecode_add_alu(ctx->bc, &alu);
10044 if (r)
10045 return r;
10046 }
10047
10048 /* result.z = log2(|src|);*/
10049 if ((inst->Dst[0].Register.WriteMask >> 2) & 1) {
10050 if (ctx->bc->chip_class == CAYMAN) {
10051 for (i = 0; i < 3; i++) {
10052 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10053
10054 alu.op = ALU_OP1_LOG_IEEE;
10055 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
10056 r600_bytecode_src_set_abs(&alu.src[0]);
10057
10058 alu.dst.sel = ctx->temp_reg;
10059 if (i == 2)
10060 alu.dst.write = 1;
10061 alu.dst.chan = i;
10062 if (i == 2)
10063 alu.last = 1;
10064
10065 r = r600_bytecode_add_alu(ctx->bc, &alu);
10066 if (r)
10067 return r;
10068 }
10069 } else {
10070 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10071
10072 alu.op = ALU_OP1_LOG_IEEE;
10073 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
10074 r600_bytecode_src_set_abs(&alu.src[0]);
10075
10076 alu.dst.sel = ctx->temp_reg;
10077 alu.dst.write = 1;
10078 alu.dst.chan = 2;
10079 alu.last = 1;
10080
10081 r = r600_bytecode_add_alu(ctx->bc, &alu);
10082 if (r)
10083 return r;
10084 }
10085 }
10086
10087 /* result.w = 1.0; */
10088 if ((inst->Dst[0].Register.WriteMask >> 3) & 1) {
10089 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10090
10091 alu.op = ALU_OP1_MOV;
10092 alu.src[0].sel = V_SQ_ALU_SRC_1;
10093 alu.src[0].chan = 0;
10094
10095 alu.dst.sel = ctx->temp_reg;
10096 alu.dst.chan = 3;
10097 alu.dst.write = 1;
10098 alu.last = 1;
10099
10100 r = r600_bytecode_add_alu(ctx->bc, &alu);
10101 if (r)
10102 return r;
10103 }
10104
10105 return tgsi_helper_copy(ctx, inst);
10106 }
10107
10108 static int tgsi_eg_arl(struct r600_shader_ctx *ctx)
10109 {
10110 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10111 struct r600_bytecode_alu alu;
10112 int r;
10113 int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
10114 unsigned reg = get_address_file_reg(ctx, inst->Dst[0].Register.Index);
10115
10116 assert(inst->Dst[0].Register.Index < 3);
10117 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10118
10119 switch (inst->Instruction.Opcode) {
10120 case TGSI_OPCODE_ARL:
10121 alu.op = ALU_OP1_FLT_TO_INT_FLOOR;
10122 break;
10123 case TGSI_OPCODE_ARR:
10124 alu.op = ALU_OP1_FLT_TO_INT;
10125 break;
10126 case TGSI_OPCODE_UARL:
10127 alu.op = ALU_OP1_MOV;
10128 break;
10129 default:
10130 assert(0);
10131 return -1;
10132 }
10133
10134 for (i = 0; i <= lasti; ++i) {
10135 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
10136 continue;
10137 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
10138 alu.last = i == lasti;
10139 alu.dst.sel = reg;
10140 alu.dst.chan = i;
10141 alu.dst.write = 1;
10142 r = r600_bytecode_add_alu(ctx->bc, &alu);
10143 if (r)
10144 return r;
10145 }
10146
10147 if (inst->Dst[0].Register.Index > 0)
10148 ctx->bc->index_loaded[inst->Dst[0].Register.Index - 1] = 0;
10149 else
10150 ctx->bc->ar_loaded = 0;
10151
10152 return 0;
10153 }
10154 static int tgsi_r600_arl(struct r600_shader_ctx *ctx)
10155 {
10156 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10157 struct r600_bytecode_alu alu;
10158 int r;
10159 int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
10160
10161 switch (inst->Instruction.Opcode) {
10162 case TGSI_OPCODE_ARL:
10163 memset(&alu, 0, sizeof(alu));
10164 alu.op = ALU_OP1_FLOOR;
10165 alu.dst.sel = ctx->bc->ar_reg;
10166 alu.dst.write = 1;
10167 for (i = 0; i <= lasti; ++i) {
10168 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
10169 alu.dst.chan = i;
10170 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
10171 alu.last = i == lasti;
10172 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
10173 return r;
10174 }
10175 }
10176
10177 memset(&alu, 0, sizeof(alu));
10178 alu.op = ALU_OP1_FLT_TO_INT;
10179 alu.src[0].sel = ctx->bc->ar_reg;
10180 alu.dst.sel = ctx->bc->ar_reg;
10181 alu.dst.write = 1;
10182 /* FLT_TO_INT is trans-only on r600/r700 */
10183 alu.last = TRUE;
10184 for (i = 0; i <= lasti; ++i) {
10185 alu.dst.chan = i;
10186 alu.src[0].chan = i;
10187 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
10188 return r;
10189 }
10190 break;
10191 case TGSI_OPCODE_ARR:
10192 memset(&alu, 0, sizeof(alu));
10193 alu.op = ALU_OP1_FLT_TO_INT;
10194 alu.dst.sel = ctx->bc->ar_reg;
10195 alu.dst.write = 1;
10196 /* FLT_TO_INT is trans-only on r600/r700 */
10197 alu.last = TRUE;
10198 for (i = 0; i <= lasti; ++i) {
10199 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
10200 alu.dst.chan = i;
10201 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
10202 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
10203 return r;
10204 }
10205 }
10206 break;
10207 case TGSI_OPCODE_UARL:
10208 memset(&alu, 0, sizeof(alu));
10209 alu.op = ALU_OP1_MOV;
10210 alu.dst.sel = ctx->bc->ar_reg;
10211 alu.dst.write = 1;
10212 for (i = 0; i <= lasti; ++i) {
10213 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
10214 alu.dst.chan = i;
10215 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
10216 alu.last = i == lasti;
10217 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
10218 return r;
10219 }
10220 }
10221 break;
10222 default:
10223 assert(0);
10224 return -1;
10225 }
10226
10227 ctx->bc->ar_loaded = 0;
10228 return 0;
10229 }
10230
10231 static int tgsi_opdst(struct r600_shader_ctx *ctx)
10232 {
10233 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10234 struct r600_bytecode_alu alu;
10235 int i, r = 0;
10236
10237 for (i = 0; i < 4; i++) {
10238 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10239
10240 alu.op = ALU_OP2_MUL;
10241 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
10242
10243 if (i == 0 || i == 3) {
10244 alu.src[0].sel = V_SQ_ALU_SRC_1;
10245 } else {
10246 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
10247 }
10248
10249 if (i == 0 || i == 2) {
10250 alu.src[1].sel = V_SQ_ALU_SRC_1;
10251 } else {
10252 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
10253 }
10254 if (i == 3)
10255 alu.last = 1;
10256 r = r600_bytecode_add_alu(ctx->bc, &alu);
10257 if (r)
10258 return r;
10259 }
10260 return 0;
10261 }
10262
10263 static int emit_logic_pred(struct r600_shader_ctx *ctx, int opcode, int alu_type,
10264 struct r600_bytecode_alu_src *src)
10265 {
10266 struct r600_bytecode_alu alu;
10267 int r;
10268
10269 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10270 alu.op = opcode;
10271 alu.execute_mask = 1;
10272 alu.update_pred = 1;
10273
10274 alu.dst.sel = ctx->temp_reg;
10275 alu.dst.write = 1;
10276 alu.dst.chan = 0;
10277
10278 alu.src[0] = *src;
10279 alu.src[1].sel = V_SQ_ALU_SRC_0;
10280 alu.src[1].chan = 0;
10281
10282 alu.last = 1;
10283
10284 r = r600_bytecode_add_alu_type(ctx->bc, &alu, alu_type);
10285 if (r)
10286 return r;
10287 return 0;
10288 }
10289
10290 static int pops(struct r600_shader_ctx *ctx, int pops)
10291 {
10292 unsigned force_pop = ctx->bc->force_add_cf;
10293
10294 if (!force_pop) {
10295 int alu_pop = 3;
10296 if (ctx->bc->cf_last) {
10297 if (ctx->bc->cf_last->op == CF_OP_ALU)
10298 alu_pop = 0;
10299 else if (ctx->bc->cf_last->op == CF_OP_ALU_POP_AFTER)
10300 alu_pop = 1;
10301 }
10302 alu_pop += pops;
10303 if (alu_pop == 1) {
10304 ctx->bc->cf_last->op = CF_OP_ALU_POP_AFTER;
10305 ctx->bc->force_add_cf = 1;
10306 } else if (alu_pop == 2) {
10307 ctx->bc->cf_last->op = CF_OP_ALU_POP2_AFTER;
10308 ctx->bc->force_add_cf = 1;
10309 } else {
10310 force_pop = 1;
10311 }
10312 }
10313
10314 if (force_pop) {
10315 r600_bytecode_add_cfinst(ctx->bc, CF_OP_POP);
10316 ctx->bc->cf_last->pop_count = pops;
10317 ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2;
10318 }
10319
10320 return 0;
10321 }
10322
10323 static inline int callstack_update_max_depth(struct r600_shader_ctx *ctx,
10324 unsigned reason)
10325 {
10326 struct r600_stack_info *stack = &ctx->bc->stack;
10327 unsigned elements;
10328 int entries;
10329
10330 unsigned entry_size = stack->entry_size;
10331
10332 elements = (stack->loop + stack->push_wqm ) * entry_size;
10333 elements += stack->push;
10334
10335 switch (ctx->bc->chip_class) {
10336 case R600:
10337 case R700:
10338 /* pre-r8xx: if any non-WQM PUSH instruction is invoked, 2 elements on
10339 * the stack must be reserved to hold the current active/continue
10340 * masks */
10341 if (reason == FC_PUSH_VPM || stack->push > 0) {
10342 elements += 2;
10343 }
10344 break;
10345
10346 case CAYMAN:
10347 /* r9xx: any stack operation on empty stack consumes 2 additional
10348 * elements */
10349 elements += 2;
10350
10351 /* fallthrough */
10352 /* FIXME: do the two elements added above cover the cases for the
10353 * r8xx+ below? */
10354
10355 case EVERGREEN:
10356 /* r8xx+: 2 extra elements are not always required, but one extra
10357 * element must be added for each of the following cases:
10358 * 1. There is an ALU_ELSE_AFTER instruction at the point of greatest
10359 * stack usage.
10360 * (Currently we don't use ALU_ELSE_AFTER.)
10361 * 2. There are LOOP/WQM frames on the stack when any flavor of non-WQM
10362 * PUSH instruction executed.
10363 *
10364 * NOTE: it seems we also need to reserve additional element in some
10365 * other cases, e.g. when we have 4 levels of PUSH_VPM in the shader,
10366 * then STACK_SIZE should be 2 instead of 1 */
10367 if (reason == FC_PUSH_VPM || stack->push > 0) {
10368 elements += 1;
10369 }
10370 break;
10371
10372 default:
10373 assert(0);
10374 break;
10375 }
10376
10377 /* NOTE: it seems STACK_SIZE is interpreted by hw as if entry_size is 4
10378 * for all chips, so we use 4 in the final formula, not the real entry_size
10379 * for the chip */
10380 entry_size = 4;
10381
10382 entries = (elements + (entry_size - 1)) / entry_size;
10383
10384 if (entries > stack->max_entries)
10385 stack->max_entries = entries;
10386 return elements;
10387 }
10388
10389 static inline void callstack_pop(struct r600_shader_ctx *ctx, unsigned reason)
10390 {
10391 switch(reason) {
10392 case FC_PUSH_VPM:
10393 --ctx->bc->stack.push;
10394 assert(ctx->bc->stack.push >= 0);
10395 break;
10396 case FC_PUSH_WQM:
10397 --ctx->bc->stack.push_wqm;
10398 assert(ctx->bc->stack.push_wqm >= 0);
10399 break;
10400 case FC_LOOP:
10401 --ctx->bc->stack.loop;
10402 assert(ctx->bc->stack.loop >= 0);
10403 break;
10404 default:
10405 assert(0);
10406 break;
10407 }
10408 }
10409
10410 static inline int callstack_push(struct r600_shader_ctx *ctx, unsigned reason)
10411 {
10412 switch (reason) {
10413 case FC_PUSH_VPM:
10414 ++ctx->bc->stack.push;
10415 break;
10416 case FC_PUSH_WQM:
10417 ++ctx->bc->stack.push_wqm;
10418 break;
10419 case FC_LOOP:
10420 ++ctx->bc->stack.loop;
10421 break;
10422 default:
10423 assert(0);
10424 }
10425
10426 return callstack_update_max_depth(ctx, reason);
10427 }
10428
10429 static void fc_set_mid(struct r600_shader_ctx *ctx, int fc_sp)
10430 {
10431 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[fc_sp];
10432
10433 sp->mid = realloc((void *)sp->mid,
10434 sizeof(struct r600_bytecode_cf *) * (sp->num_mid + 1));
10435 sp->mid[sp->num_mid] = ctx->bc->cf_last;
10436 sp->num_mid++;
10437 }
10438
10439 static void fc_pushlevel(struct r600_shader_ctx *ctx, int type)
10440 {
10441 assert(ctx->bc->fc_sp < ARRAY_SIZE(ctx->bc->fc_stack));
10442 ctx->bc->fc_stack[ctx->bc->fc_sp].type = type;
10443 ctx->bc->fc_stack[ctx->bc->fc_sp].start = ctx->bc->cf_last;
10444 ctx->bc->fc_sp++;
10445 }
10446
10447 static void fc_poplevel(struct r600_shader_ctx *ctx)
10448 {
10449 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[ctx->bc->fc_sp - 1];
10450 free(sp->mid);
10451 sp->mid = NULL;
10452 sp->num_mid = 0;
10453 sp->start = NULL;
10454 sp->type = 0;
10455 ctx->bc->fc_sp--;
10456 }
10457
10458 #if 0
10459 static int emit_return(struct r600_shader_ctx *ctx)
10460 {
10461 r600_bytecode_add_cfinst(ctx->bc, CF_OP_RETURN));
10462 return 0;
10463 }
10464
10465 static int emit_jump_to_offset(struct r600_shader_ctx *ctx, int pops, int offset)
10466 {
10467
10468 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP));
10469 ctx->bc->cf_last->pop_count = pops;
10470 /* XXX work out offset */
10471 return 0;
10472 }
10473
10474 static int emit_setret_in_loop_flag(struct r600_shader_ctx *ctx, unsigned flag_value)
10475 {
10476 return 0;
10477 }
10478
10479 static void emit_testflag(struct r600_shader_ctx *ctx)
10480 {
10481
10482 }
10483
10484 static void emit_return_on_flag(struct r600_shader_ctx *ctx, unsigned ifidx)
10485 {
10486 emit_testflag(ctx);
10487 emit_jump_to_offset(ctx, 1, 4);
10488 emit_setret_in_loop_flag(ctx, V_SQ_ALU_SRC_0);
10489 pops(ctx, ifidx + 1);
10490 emit_return(ctx);
10491 }
10492
10493 static void break_loop_on_flag(struct r600_shader_ctx *ctx, unsigned fc_sp)
10494 {
10495 emit_testflag(ctx);
10496
10497 r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
10498 ctx->bc->cf_last->pop_count = 1;
10499
10500 fc_set_mid(ctx, fc_sp);
10501
10502 pops(ctx, 1);
10503 }
10504 #endif
10505
10506 static int emit_if(struct r600_shader_ctx *ctx, int opcode,
10507 struct r600_bytecode_alu_src *src)
10508 {
10509 int alu_type = CF_OP_ALU_PUSH_BEFORE;
10510 bool needs_workaround = false;
10511 int elems = callstack_push(ctx, FC_PUSH_VPM);
10512
10513 if (ctx->bc->chip_class == CAYMAN && ctx->bc->stack.loop > 1)
10514 needs_workaround = true;
10515
10516 if (ctx->bc->chip_class == EVERGREEN && ctx_needs_stack_workaround_8xx(ctx)) {
10517 unsigned dmod1 = (elems - 1) % ctx->bc->stack.entry_size;
10518 unsigned dmod2 = (elems) % ctx->bc->stack.entry_size;
10519
10520 if (elems && (!dmod1 || !dmod2))
10521 needs_workaround = true;
10522 }
10523
10524 /* There is a hardware bug on Cayman where a BREAK/CONTINUE followed by
10525 * LOOP_STARTxxx for nested loops may put the branch stack into a state
10526 * such that ALU_PUSH_BEFORE doesn't work as expected. Workaround this
10527 * by replacing the ALU_PUSH_BEFORE with a PUSH + ALU */
10528 if (needs_workaround) {
10529 r600_bytecode_add_cfinst(ctx->bc, CF_OP_PUSH);
10530 ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2;
10531 alu_type = CF_OP_ALU;
10532 }
10533
10534 emit_logic_pred(ctx, opcode, alu_type, src);
10535
10536 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP);
10537
10538 fc_pushlevel(ctx, FC_IF);
10539
10540 return 0;
10541 }
10542
10543 static int tgsi_if(struct r600_shader_ctx *ctx)
10544 {
10545 struct r600_bytecode_alu_src alu_src;
10546 r600_bytecode_src(&alu_src, &ctx->src[0], 0);
10547
10548 return emit_if(ctx, ALU_OP2_PRED_SETNE, &alu_src);
10549 }
10550
10551 static int tgsi_uif(struct r600_shader_ctx *ctx)
10552 {
10553 struct r600_bytecode_alu_src alu_src;
10554 r600_bytecode_src(&alu_src, &ctx->src[0], 0);
10555 return emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src);
10556 }
10557
10558 static int tgsi_else(struct r600_shader_ctx *ctx)
10559 {
10560 r600_bytecode_add_cfinst(ctx->bc, CF_OP_ELSE);
10561 ctx->bc->cf_last->pop_count = 1;
10562
10563 fc_set_mid(ctx, ctx->bc->fc_sp - 1);
10564 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id;
10565 return 0;
10566 }
10567
10568 static int tgsi_endif(struct r600_shader_ctx *ctx)
10569 {
10570 int offset = 2;
10571 pops(ctx, 1);
10572 if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].type != FC_IF) {
10573 R600_ERR("if/endif unbalanced in shader\n");
10574 return -1;
10575 }
10576
10577 /* ALU_EXTENDED needs 4 DWords instead of two, adjust jump target offset accordingly */
10578 if (ctx->bc->cf_last->eg_alu_extended)
10579 offset += 2;
10580
10581 if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid == NULL) {
10582 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id + offset;
10583 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->pop_count = 1;
10584 } else {
10585 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid[0]->cf_addr = ctx->bc->cf_last->id + offset;
10586 }
10587 fc_poplevel(ctx);
10588
10589 callstack_pop(ctx, FC_PUSH_VPM);
10590 return 0;
10591 }
10592
10593 static int tgsi_bgnloop(struct r600_shader_ctx *ctx)
10594 {
10595 /* LOOP_START_DX10 ignores the LOOP_CONFIG* registers, so it is not
10596 * limited to 4096 iterations, like the other LOOP_* instructions. */
10597 r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_START_DX10);
10598
10599 fc_pushlevel(ctx, FC_LOOP);
10600
10601 /* check stack depth */
10602 callstack_push(ctx, FC_LOOP);
10603 return 0;
10604 }
10605
10606 static int tgsi_endloop(struct r600_shader_ctx *ctx)
10607 {
10608 int i;
10609
10610 r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_END);
10611
10612 if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].type != FC_LOOP) {
10613 R600_ERR("loop/endloop in shader code are not paired.\n");
10614 return -EINVAL;
10615 }
10616
10617 /* fixup loop pointers - from r600isa
10618 LOOP END points to CF after LOOP START,
10619 LOOP START point to CF after LOOP END
10620 BRK/CONT point to LOOP END CF
10621 */
10622 ctx->bc->cf_last->cf_addr = ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->id + 2;
10623
10624 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id + 2;
10625
10626 for (i = 0; i < ctx->bc->fc_stack[ctx->bc->fc_sp - 1].num_mid; i++) {
10627 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid[i]->cf_addr = ctx->bc->cf_last->id;
10628 }
10629 /* XXX add LOOPRET support */
10630 fc_poplevel(ctx);
10631 callstack_pop(ctx, FC_LOOP);
10632 return 0;
10633 }
10634
10635 static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx)
10636 {
10637 unsigned int fscp;
10638
10639 for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--)
10640 {
10641 if (FC_LOOP == ctx->bc->fc_stack[fscp - 1].type)
10642 break;
10643 }
10644
10645 if (fscp == 0) {
10646 R600_ERR("Break not inside loop/endloop pair\n");
10647 return -EINVAL;
10648 }
10649
10650 r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
10651
10652 fc_set_mid(ctx, fscp - 1);
10653
10654 return 0;
10655 }
10656
10657 static int tgsi_gs_emit(struct r600_shader_ctx *ctx)
10658 {
10659 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10660 int stream = ctx->literals[inst->Src[0].Register.Index * 4 + inst->Src[0].Register.SwizzleX];
10661 int r;
10662
10663 if (ctx->inst_info->op == CF_OP_EMIT_VERTEX)
10664 emit_gs_ring_writes(ctx, ctx->gs_stream_output_info, stream, TRUE);
10665
10666 r = r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
10667 if (!r) {
10668 ctx->bc->cf_last->count = stream; // Count field for CUT/EMIT_VERTEX indicates which stream
10669 if (ctx->inst_info->op == CF_OP_EMIT_VERTEX)
10670 return emit_inc_ring_offset(ctx, stream, TRUE);
10671 }
10672 return r;
10673 }
10674
10675 static int tgsi_umad(struct r600_shader_ctx *ctx)
10676 {
10677 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10678 struct r600_bytecode_alu alu;
10679 int i, j, r;
10680 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
10681
10682 /* src0 * src1 */
10683 for (i = 0; i < lasti + 1; i++) {
10684 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
10685 continue;
10686
10687 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10688
10689 alu.dst.chan = i;
10690 alu.dst.sel = ctx->temp_reg;
10691 alu.dst.write = 1;
10692
10693 alu.op = ALU_OP2_MULLO_UINT;
10694 for (j = 0; j < 2; j++) {
10695 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
10696 }
10697
10698 alu.last = 1;
10699 r = emit_mul_int_op(ctx->bc, &alu);
10700 if (r)
10701 return r;
10702 }
10703
10704
10705 for (i = 0; i < lasti + 1; i++) {
10706 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
10707 continue;
10708
10709 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10710 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
10711
10712 alu.op = ALU_OP2_ADD_INT;
10713
10714 alu.src[0].sel = ctx->temp_reg;
10715 alu.src[0].chan = i;
10716
10717 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
10718 if (i == lasti) {
10719 alu.last = 1;
10720 }
10721 r = r600_bytecode_add_alu(ctx->bc, &alu);
10722 if (r)
10723 return r;
10724 }
10725 return 0;
10726 }
10727
10728 static int tgsi_pk2h(struct r600_shader_ctx *ctx)
10729 {
10730 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10731 struct r600_bytecode_alu alu;
10732 int r, i;
10733 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
10734
10735 /* temp.xy = f32_to_f16(src) */
10736 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10737 alu.op = ALU_OP1_FLT32_TO_FLT16;
10738 alu.dst.chan = 0;
10739 alu.dst.sel = ctx->temp_reg;
10740 alu.dst.write = 1;
10741 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
10742 r = r600_bytecode_add_alu(ctx->bc, &alu);
10743 if (r)
10744 return r;
10745 alu.dst.chan = 1;
10746 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
10747 alu.last = 1;
10748 r = r600_bytecode_add_alu(ctx->bc, &alu);
10749 if (r)
10750 return r;
10751
10752 /* dst.x = temp.y * 0x10000 + temp.x */
10753 for (i = 0; i < lasti + 1; i++) {
10754 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
10755 continue;
10756
10757 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10758 alu.op = ALU_OP3_MULADD_UINT24;
10759 alu.is_op3 = 1;
10760 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
10761 alu.last = i == lasti;
10762 alu.src[0].sel = ctx->temp_reg;
10763 alu.src[0].chan = 1;
10764 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
10765 alu.src[1].value = 0x10000;
10766 alu.src[2].sel = ctx->temp_reg;
10767 alu.src[2].chan = 0;
10768 r = r600_bytecode_add_alu(ctx->bc, &alu);
10769 if (r)
10770 return r;
10771 }
10772
10773 return 0;
10774 }
10775
10776 static int tgsi_up2h(struct r600_shader_ctx *ctx)
10777 {
10778 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10779 struct r600_bytecode_alu alu;
10780 int r, i;
10781 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
10782
10783 /* temp.x = src.x */
10784 /* note: no need to mask out the high bits */
10785 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10786 alu.op = ALU_OP1_MOV;
10787 alu.dst.chan = 0;
10788 alu.dst.sel = ctx->temp_reg;
10789 alu.dst.write = 1;
10790 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
10791 r = r600_bytecode_add_alu(ctx->bc, &alu);
10792 if (r)
10793 return r;
10794
10795 /* temp.y = src.x >> 16 */
10796 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10797 alu.op = ALU_OP2_LSHR_INT;
10798 alu.dst.chan = 1;
10799 alu.dst.sel = ctx->temp_reg;
10800 alu.dst.write = 1;
10801 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
10802 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
10803 alu.src[1].value = 16;
10804 alu.last = 1;
10805 r = r600_bytecode_add_alu(ctx->bc, &alu);
10806 if (r)
10807 return r;
10808
10809 /* dst.wz = dst.xy = f16_to_f32(temp.xy) */
10810 for (i = 0; i < lasti + 1; i++) {
10811 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
10812 continue;
10813 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10814 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
10815 alu.op = ALU_OP1_FLT16_TO_FLT32;
10816 alu.src[0].sel = ctx->temp_reg;
10817 alu.src[0].chan = i % 2;
10818 alu.last = i == lasti;
10819 r = r600_bytecode_add_alu(ctx->bc, &alu);
10820 if (r)
10821 return r;
10822 }
10823
10824 return 0;
10825 }
10826
10827 static int tgsi_bfe(struct r600_shader_ctx *ctx)
10828 {
10829 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10830 struct r600_bytecode_alu alu;
10831 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
10832 int r, i;
10833 int dst = -1;
10834
10835 if ((inst->Src[0].Register.File == inst->Dst[0].Register.File &&
10836 inst->Src[0].Register.Index == inst->Dst[0].Register.Index) ||
10837 (inst->Src[2].Register.File == inst->Dst[0].Register.File &&
10838 inst->Src[2].Register.Index == inst->Dst[0].Register.Index))
10839 dst = r600_get_temp(ctx);
10840
10841 r = tgsi_op3_dst(ctx, dst);
10842 if (r)
10843 return r;
10844
10845 for (i = 0; i < lasti + 1; i++) {
10846 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10847 alu.op = ALU_OP2_SETGE_INT;
10848 r600_bytecode_src(&alu.src[0], &ctx->src[2], i);
10849 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
10850 alu.src[1].value = 32;
10851 alu.dst.sel = ctx->temp_reg;
10852 alu.dst.chan = i;
10853 alu.dst.write = 1;
10854 if (i == lasti)
10855 alu.last = 1;
10856 r = r600_bytecode_add_alu(ctx->bc, &alu);
10857 if (r)
10858 return r;
10859 }
10860
10861 for (i = 0; i < lasti + 1; i++) {
10862 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10863 alu.op = ALU_OP3_CNDE_INT;
10864 alu.is_op3 = 1;
10865 alu.src[0].sel = ctx->temp_reg;
10866 alu.src[0].chan = i;
10867
10868 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
10869 if (dst != -1)
10870 alu.src[1].sel = dst;
10871 else
10872 alu.src[1].sel = alu.dst.sel;
10873 alu.src[1].chan = i;
10874 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
10875 alu.dst.write = 1;
10876 if (i == lasti)
10877 alu.last = 1;
10878 r = r600_bytecode_add_alu(ctx->bc, &alu);
10879 if (r)
10880 return r;
10881 }
10882
10883 return 0;
10884 }
10885
10886 static int tgsi_clock(struct r600_shader_ctx *ctx)
10887 {
10888 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10889 struct r600_bytecode_alu alu;
10890 int r;
10891
10892 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10893 alu.op = ALU_OP1_MOV;
10894 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
10895 alu.src[0].sel = EG_V_SQ_ALU_SRC_TIME_LO;
10896 r = r600_bytecode_add_alu(ctx->bc, &alu);
10897 if (r)
10898 return r;
10899 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10900 alu.op = ALU_OP1_MOV;
10901 tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
10902 alu.src[0].sel = EG_V_SQ_ALU_SRC_TIME_HI;
10903 alu.last = 1;
10904 r = r600_bytecode_add_alu(ctx->bc, &alu);
10905 if (r)
10906 return r;
10907 return 0;
10908 }
10909
10910 static int emit_u64add(struct r600_shader_ctx *ctx, int op,
10911 int treg,
10912 int src0_sel, int src0_chan,
10913 int src1_sel, int src1_chan)
10914 {
10915 struct r600_bytecode_alu alu;
10916 int r;
10917 int opc;
10918
10919 if (op == ALU_OP2_ADD_INT)
10920 opc = ALU_OP2_ADDC_UINT;
10921 else
10922 opc = ALU_OP2_SUBB_UINT;
10923
10924 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10925 alu.op = op; ;
10926 alu.dst.sel = treg;
10927 alu.dst.chan = 0;
10928 alu.dst.write = 1;
10929 alu.src[0].sel = src0_sel;
10930 alu.src[0].chan = src0_chan + 0;
10931 alu.src[1].sel = src1_sel;
10932 alu.src[1].chan = src1_chan + 0;
10933 alu.src[1].neg = 0;
10934 r = r600_bytecode_add_alu(ctx->bc, &alu);
10935 if (r)
10936 return r;
10937
10938 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10939 alu.op = op;
10940 alu.dst.sel = treg;
10941 alu.dst.chan = 1;
10942 alu.dst.write = 1;
10943 alu.src[0].sel = src0_sel;
10944 alu.src[0].chan = src0_chan + 1;
10945 alu.src[1].sel = src1_sel;
10946 alu.src[1].chan = src1_chan + 1;
10947 alu.src[1].neg = 0;
10948 r = r600_bytecode_add_alu(ctx->bc, &alu);
10949 if (r)
10950 return r;
10951
10952 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10953 alu.op = opc;
10954 alu.dst.sel = treg;
10955 alu.dst.chan = 2;
10956 alu.dst.write = 1;
10957 alu.last = 1;
10958 alu.src[0].sel = src0_sel;
10959 alu.src[0].chan = src0_chan + 0;
10960 alu.src[1].sel = src1_sel;
10961 alu.src[1].chan = src1_chan + 0;
10962 alu.src[1].neg = 0;
10963 r = r600_bytecode_add_alu(ctx->bc, &alu);
10964 if (r)
10965 return r;
10966
10967 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10968 alu.op = op;
10969 alu.dst.sel = treg;
10970 alu.dst.chan = 1;
10971 alu.dst.write = 1;
10972 alu.src[0].sel = treg;
10973 alu.src[0].chan = 1;
10974 alu.src[1].sel = treg;
10975 alu.src[1].chan = 2;
10976 alu.last = 1;
10977 r = r600_bytecode_add_alu(ctx->bc, &alu);
10978 if (r)
10979 return r;
10980 return 0;
10981 }
10982
10983 static int egcm_u64add(struct r600_shader_ctx *ctx)
10984 {
10985 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
10986 struct r600_bytecode_alu alu;
10987 int r;
10988 int treg = ctx->temp_reg;
10989 int op = ALU_OP2_ADD_INT, opc = ALU_OP2_ADDC_UINT;
10990
10991 if (ctx->src[1].neg) {
10992 op = ALU_OP2_SUB_INT;
10993 opc = ALU_OP2_SUBB_UINT;
10994 }
10995 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
10996 alu.op = op; ;
10997 alu.dst.sel = treg;
10998 alu.dst.chan = 0;
10999 alu.dst.write = 1;
11000 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
11001 r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
11002 alu.src[1].neg = 0;
11003 r = r600_bytecode_add_alu(ctx->bc, &alu);
11004 if (r)
11005 return r;
11006
11007 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11008 alu.op = op;
11009 alu.dst.sel = treg;
11010 alu.dst.chan = 1;
11011 alu.dst.write = 1;
11012 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
11013 r600_bytecode_src(&alu.src[1], &ctx->src[1], 1);
11014 alu.src[1].neg = 0;
11015 r = r600_bytecode_add_alu(ctx->bc, &alu);
11016 if (r)
11017 return r;
11018
11019 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11020 alu.op = opc ;
11021 alu.dst.sel = treg;
11022 alu.dst.chan = 2;
11023 alu.dst.write = 1;
11024 alu.last = 1;
11025 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
11026 r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
11027 alu.src[1].neg = 0;
11028 r = r600_bytecode_add_alu(ctx->bc, &alu);
11029 if (r)
11030 return r;
11031
11032 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11033 alu.op = op;
11034 tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
11035 alu.src[0].sel = treg;
11036 alu.src[0].chan = 1;
11037 alu.src[1].sel = treg;
11038 alu.src[1].chan = 2;
11039 alu.last = 1;
11040 r = r600_bytecode_add_alu(ctx->bc, &alu);
11041 if (r)
11042 return r;
11043 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11044 alu.op = ALU_OP1_MOV;
11045 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
11046 alu.src[0].sel = treg;
11047 alu.src[0].chan = 0;
11048 alu.last = 1;
11049 r = r600_bytecode_add_alu(ctx->bc, &alu);
11050 if (r)
11051 return r;
11052 return 0;
11053 }
11054
11055 /* result.y = mul_high a, b
11056 result.x = mul a,b
11057 result.y += a.x * b.y + a.y * b.x;
11058 */
11059 static int egcm_u64mul(struct r600_shader_ctx *ctx)
11060 {
11061 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
11062 struct r600_bytecode_alu alu;
11063 int r;
11064 int treg = ctx->temp_reg;
11065
11066 /* temp.x = mul_lo a.x, b.x */
11067 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11068 alu.op = ALU_OP2_MULLO_UINT;
11069 alu.dst.sel = treg;
11070 alu.dst.chan = 0;
11071 alu.dst.write = 1;
11072 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
11073 r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
11074 r = emit_mul_int_op(ctx->bc, &alu);
11075 if (r)
11076 return r;
11077
11078 /* temp.y = mul_hi a.x, b.x */
11079 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11080 alu.op = ALU_OP2_MULHI_UINT;
11081 alu.dst.sel = treg;
11082 alu.dst.chan = 1;
11083 alu.dst.write = 1;
11084 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
11085 r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
11086 r = emit_mul_int_op(ctx->bc, &alu);
11087 if (r)
11088 return r;
11089
11090 /* temp.z = mul a.x, b.y */
11091 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11092 alu.op = ALU_OP2_MULLO_UINT;
11093 alu.dst.sel = treg;
11094 alu.dst.chan = 2;
11095 alu.dst.write = 1;
11096 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
11097 r600_bytecode_src(&alu.src[1], &ctx->src[1], 1);
11098 r = emit_mul_int_op(ctx->bc, &alu);
11099 if (r)
11100 return r;
11101
11102 /* temp.w = mul a.y, b.x */
11103 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11104 alu.op = ALU_OP2_MULLO_UINT;
11105 alu.dst.sel = treg;
11106 alu.dst.chan = 3;
11107 alu.dst.write = 1;
11108 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
11109 r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
11110 r = emit_mul_int_op(ctx->bc, &alu);
11111 if (r)
11112 return r;
11113
11114 /* temp.z = temp.z + temp.w */
11115 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11116 alu.op = ALU_OP2_ADD_INT;
11117 alu.dst.sel = treg;
11118 alu.dst.chan = 2;
11119 alu.dst.write = 1;
11120 alu.src[0].sel = treg;
11121 alu.src[0].chan = 2;
11122 alu.src[1].sel = treg;
11123 alu.src[1].chan = 3;
11124 alu.last = 1;
11125 r = r600_bytecode_add_alu(ctx->bc, &alu);
11126 if (r)
11127 return r;
11128
11129 /* temp.y = temp.y + temp.z */
11130 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11131 alu.op = ALU_OP2_ADD_INT;
11132 alu.dst.sel = treg;
11133 alu.dst.chan = 1;
11134 alu.dst.write = 1;
11135 alu.src[0].sel = treg;
11136 alu.src[0].chan = 1;
11137 alu.src[1].sel = treg;
11138 alu.src[1].chan = 2;
11139 alu.last = 1;
11140 r = r600_bytecode_add_alu(ctx->bc, &alu);
11141 if (r)
11142 return r;
11143
11144 /* dst.x = temp.x */
11145 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11146 alu.op = ALU_OP1_MOV;
11147 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
11148 alu.src[0].sel = treg;
11149 alu.src[0].chan = 0;
11150 r = r600_bytecode_add_alu(ctx->bc, &alu);
11151 if (r)
11152 return r;
11153
11154 /* dst.y = temp.y */
11155 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11156 alu.op = ALU_OP1_MOV;
11157 tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
11158 alu.src[0].sel = treg;
11159 alu.src[0].chan = 1;
11160 alu.last = 1;
11161 r = r600_bytecode_add_alu(ctx->bc, &alu);
11162 if (r)
11163 return r;
11164
11165 return 0;
11166 }
11167
11168 static int emit_u64sge(struct r600_shader_ctx *ctx,
11169 int treg,
11170 int src0_sel, int src0_base_chan,
11171 int src1_sel, int src1_base_chan)
11172 {
11173 int r;
11174 /* for 64-bit sge */
11175 /* result = (src0.y > src1.y) || ((src0.y == src1.y) && src0.x >= src1.x)) */
11176 r = single_alu_op2(ctx, ALU_OP2_SETGT_UINT,
11177 treg, 1,
11178 src0_sel, src0_base_chan + 1,
11179 src1_sel, src1_base_chan + 1);
11180 if (r)
11181 return r;
11182
11183 r = single_alu_op2(ctx, ALU_OP2_SETGE_UINT,
11184 treg, 0,
11185 src0_sel, src0_base_chan,
11186 src1_sel, src1_base_chan);
11187 if (r)
11188 return r;
11189
11190 r = single_alu_op2(ctx, ALU_OP2_SETE_INT,
11191 treg, 2,
11192 src0_sel, src0_base_chan + 1,
11193 src1_sel, src1_base_chan + 1);
11194 if (r)
11195 return r;
11196
11197 r = single_alu_op2(ctx, ALU_OP2_AND_INT,
11198 treg, 0,
11199 treg, 0,
11200 treg, 2);
11201 if (r)
11202 return r;
11203
11204 r = single_alu_op2(ctx, ALU_OP2_OR_INT,
11205 treg, 0,
11206 treg, 0,
11207 treg, 1);
11208 if (r)
11209 return r;
11210 return 0;
11211 }
11212
11213 /* this isn't a complete div it's just enough for qbo shader to work */
11214 static int egcm_u64div(struct r600_shader_ctx *ctx)
11215 {
11216 struct r600_bytecode_alu alu;
11217 struct r600_bytecode_alu_src alu_num_hi, alu_num_lo, alu_denom_hi, alu_denom_lo, alu_src;
11218 int r, i;
11219 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
11220
11221 /* make sure we are dividing my a const with 0 in the high bits */
11222 if (ctx->src[1].sel != V_SQ_ALU_SRC_LITERAL)
11223 return -1;
11224 if (ctx->src[1].value[ctx->src[1].swizzle[1]] != 0)
11225 return -1;
11226 /* make sure we are doing one division */
11227 if (inst->Dst[0].Register.WriteMask != 0x3)
11228 return -1;
11229
11230 /* emit_if uses ctx->temp_reg so we can't */
11231 int treg = r600_get_temp(ctx);
11232 int tmp_num = r600_get_temp(ctx);
11233 int sub_tmp = r600_get_temp(ctx);
11234
11235 /* tmp quot are tmp_num.zw */
11236 r600_bytecode_src(&alu_num_lo, &ctx->src[0], 0);
11237 r600_bytecode_src(&alu_num_hi, &ctx->src[0], 1);
11238 r600_bytecode_src(&alu_denom_lo, &ctx->src[1], 0);
11239 r600_bytecode_src(&alu_denom_hi, &ctx->src[1], 1);
11240
11241 /* MOV tmp_num.xy, numerator */
11242 r = single_alu_op2(ctx, ALU_OP1_MOV,
11243 tmp_num, 0,
11244 alu_num_lo.sel, alu_num_lo.chan,
11245 0, 0);
11246 if (r)
11247 return r;
11248 r = single_alu_op2(ctx, ALU_OP1_MOV,
11249 tmp_num, 1,
11250 alu_num_hi.sel, alu_num_hi.chan,
11251 0, 0);
11252 if (r)
11253 return r;
11254
11255 r = single_alu_op2(ctx, ALU_OP1_MOV,
11256 tmp_num, 2,
11257 V_SQ_ALU_SRC_LITERAL, 0,
11258 0, 0);
11259 if (r)
11260 return r;
11261
11262 r = single_alu_op2(ctx, ALU_OP1_MOV,
11263 tmp_num, 3,
11264 V_SQ_ALU_SRC_LITERAL, 0,
11265 0, 0);
11266 if (r)
11267 return r;
11268
11269 /* treg 0 is log2_denom */
11270 /* normally this gets the MSB for the denom high value
11271 - however we know this will always be 0 here. */
11272 r = single_alu_op2(ctx,
11273 ALU_OP1_MOV,
11274 treg, 0,
11275 V_SQ_ALU_SRC_LITERAL, 32,
11276 0, 0);
11277 if (r)
11278 return r;
11279
11280 /* normally check demon hi for 0, but we know it is already */
11281 /* t0.z = num_hi >= denom_lo */
11282 r = single_alu_op2(ctx,
11283 ALU_OP2_SETGE_UINT,
11284 treg, 1,
11285 alu_num_hi.sel, alu_num_hi.chan,
11286 V_SQ_ALU_SRC_LITERAL, alu_denom_lo.value);
11287 if (r)
11288 return r;
11289
11290 memset(&alu_src, 0, sizeof(alu_src));
11291 alu_src.sel = treg;
11292 alu_src.chan = 1;
11293 r = emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src);
11294 if (r)
11295 return r;
11296
11297 /* for loops in here */
11298 /* get msb t0.x = msb(src[1].x) first */
11299 int msb_lo = util_last_bit(alu_denom_lo.value);
11300 r = single_alu_op2(ctx, ALU_OP1_MOV,
11301 treg, 0,
11302 V_SQ_ALU_SRC_LITERAL, msb_lo,
11303 0, 0);
11304 if (r)
11305 return r;
11306
11307 /* unroll the asm here */
11308 for (i = 0; i < 31; i++) {
11309 r = single_alu_op2(ctx, ALU_OP2_SETGE_UINT,
11310 treg, 2,
11311 V_SQ_ALU_SRC_LITERAL, i,
11312 treg, 0);
11313 if (r)
11314 return r;
11315
11316 /* we can do this on the CPU */
11317 uint32_t denom_lo_shl = alu_denom_lo.value << (31 - i);
11318 /* t0.z = tmp_num.y >= t0.z */
11319 r = single_alu_op2(ctx, ALU_OP2_SETGE_UINT,
11320 treg, 1,
11321 tmp_num, 1,
11322 V_SQ_ALU_SRC_LITERAL, denom_lo_shl);
11323 if (r)
11324 return r;
11325
11326 r = single_alu_op2(ctx, ALU_OP2_AND_INT,
11327 treg, 1,
11328 treg, 1,
11329 treg, 2);
11330 if (r)
11331 return r;
11332
11333 memset(&alu_src, 0, sizeof(alu_src));
11334 alu_src.sel = treg;
11335 alu_src.chan = 1;
11336 r = emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src);
11337 if (r)
11338 return r;
11339
11340 r = single_alu_op2(ctx, ALU_OP2_SUB_INT,
11341 tmp_num, 1,
11342 tmp_num, 1,
11343 V_SQ_ALU_SRC_LITERAL, denom_lo_shl);
11344 if (r)
11345 return r;
11346
11347 r = single_alu_op2(ctx, ALU_OP2_OR_INT,
11348 tmp_num, 3,
11349 tmp_num, 3,
11350 V_SQ_ALU_SRC_LITERAL, 1U << (31 - i));
11351 if (r)
11352 return r;
11353
11354 r = tgsi_endif(ctx);
11355 if (r)
11356 return r;
11357 }
11358
11359 /* log2_denom is always <= 31, so manually peel the last loop
11360 * iteration.
11361 */
11362 r = single_alu_op2(ctx, ALU_OP2_SETGE_UINT,
11363 treg, 1,
11364 tmp_num, 1,
11365 V_SQ_ALU_SRC_LITERAL, alu_denom_lo.value);
11366 if (r)
11367 return r;
11368
11369 memset(&alu_src, 0, sizeof(alu_src));
11370 alu_src.sel = treg;
11371 alu_src.chan = 1;
11372 r = emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src);
11373 if (r)
11374 return r;
11375
11376 r = single_alu_op2(ctx, ALU_OP2_SUB_INT,
11377 tmp_num, 1,
11378 tmp_num, 1,
11379 V_SQ_ALU_SRC_LITERAL, alu_denom_lo.value);
11380 if (r)
11381 return r;
11382
11383 r = single_alu_op2(ctx, ALU_OP2_OR_INT,
11384 tmp_num, 3,
11385 tmp_num, 3,
11386 V_SQ_ALU_SRC_LITERAL, 1U);
11387 if (r)
11388 return r;
11389 r = tgsi_endif(ctx);
11390 if (r)
11391 return r;
11392
11393 r = tgsi_endif(ctx);
11394 if (r)
11395 return r;
11396
11397 /* onto the second loop to unroll */
11398 for (i = 0; i < 31; i++) {
11399 r = single_alu_op2(ctx, ALU_OP2_SETGE_UINT,
11400 treg, 1,
11401 V_SQ_ALU_SRC_LITERAL, (63 - (31 - i)),
11402 treg, 0);
11403 if (r)
11404 return r;
11405
11406 uint64_t denom_shl = (uint64_t)alu_denom_lo.value << (31 - i);
11407 r = single_alu_op2(ctx, ALU_OP1_MOV,
11408 treg, 2,
11409 V_SQ_ALU_SRC_LITERAL, (denom_shl & 0xffffffff),
11410 0, 0);
11411 if (r)
11412 return r;
11413
11414 r = single_alu_op2(ctx, ALU_OP1_MOV,
11415 treg, 3,
11416 V_SQ_ALU_SRC_LITERAL, (denom_shl >> 32),
11417 0, 0);
11418 if (r)
11419 return r;
11420
11421 r = emit_u64sge(ctx, sub_tmp,
11422 tmp_num, 0,
11423 treg, 2);
11424 if (r)
11425 return r;
11426
11427 r = single_alu_op2(ctx, ALU_OP2_AND_INT,
11428 treg, 1,
11429 treg, 1,
11430 sub_tmp, 0);
11431 if (r)
11432 return r;
11433
11434 memset(&alu_src, 0, sizeof(alu_src));
11435 alu_src.sel = treg;
11436 alu_src.chan = 1;
11437 r = emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src);
11438 if (r)
11439 return r;
11440
11441
11442 r = emit_u64add(ctx, ALU_OP2_SUB_INT,
11443 sub_tmp,
11444 tmp_num, 0,
11445 treg, 2);
11446 if (r)
11447 return r;
11448
11449 r = single_alu_op2(ctx, ALU_OP1_MOV,
11450 tmp_num, 0,
11451 sub_tmp, 0,
11452 0, 0);
11453 if (r)
11454 return r;
11455
11456 r = single_alu_op2(ctx, ALU_OP1_MOV,
11457 tmp_num, 1,
11458 sub_tmp, 1,
11459 0, 0);
11460 if (r)
11461 return r;
11462
11463 r = single_alu_op2(ctx, ALU_OP2_OR_INT,
11464 tmp_num, 2,
11465 tmp_num, 2,
11466 V_SQ_ALU_SRC_LITERAL, 1U << (31 - i));
11467 if (r)
11468 return r;
11469
11470 r = tgsi_endif(ctx);
11471 if (r)
11472 return r;
11473 }
11474
11475 /* log2_denom is always <= 63, so manually peel the last loop
11476 * iteration.
11477 */
11478 uint64_t denom_shl = (uint64_t)alu_denom_lo.value;
11479 r = single_alu_op2(ctx, ALU_OP1_MOV,
11480 treg, 2,
11481 V_SQ_ALU_SRC_LITERAL, (denom_shl & 0xffffffff),
11482 0, 0);
11483 if (r)
11484 return r;
11485
11486 r = single_alu_op2(ctx, ALU_OP1_MOV,
11487 treg, 3,
11488 V_SQ_ALU_SRC_LITERAL, (denom_shl >> 32),
11489 0, 0);
11490 if (r)
11491 return r;
11492
11493 r = emit_u64sge(ctx, sub_tmp,
11494 tmp_num, 0,
11495 treg, 2);
11496 if (r)
11497 return r;
11498
11499 memset(&alu_src, 0, sizeof(alu_src));
11500 alu_src.sel = sub_tmp;
11501 alu_src.chan = 0;
11502 r = emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src);
11503 if (r)
11504 return r;
11505
11506 r = emit_u64add(ctx, ALU_OP2_SUB_INT,
11507 sub_tmp,
11508 tmp_num, 0,
11509 treg, 2);
11510 if (r)
11511 return r;
11512
11513 r = single_alu_op2(ctx, ALU_OP2_OR_INT,
11514 tmp_num, 2,
11515 tmp_num, 2,
11516 V_SQ_ALU_SRC_LITERAL, 1U);
11517 if (r)
11518 return r;
11519 r = tgsi_endif(ctx);
11520 if (r)
11521 return r;
11522
11523 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11524 alu.op = ALU_OP1_MOV;
11525 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
11526 alu.src[0].sel = tmp_num;
11527 alu.src[0].chan = 2;
11528 r = r600_bytecode_add_alu(ctx->bc, &alu);
11529 if (r)
11530 return r;
11531
11532 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11533 alu.op = ALU_OP1_MOV;
11534 tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
11535 alu.src[0].sel = tmp_num;
11536 alu.src[0].chan = 3;
11537 alu.last = 1;
11538 r = r600_bytecode_add_alu(ctx->bc, &alu);
11539 if (r)
11540 return r;
11541 return 0;
11542 }
11543
11544 static int egcm_u64sne(struct r600_shader_ctx *ctx)
11545 {
11546 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
11547 struct r600_bytecode_alu alu;
11548 int r;
11549 int treg = ctx->temp_reg;
11550
11551 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11552 alu.op = ALU_OP2_SETNE_INT;
11553 alu.dst.sel = treg;
11554 alu.dst.chan = 0;
11555 alu.dst.write = 1;
11556 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
11557 r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
11558 r = r600_bytecode_add_alu(ctx->bc, &alu);
11559 if (r)
11560 return r;
11561
11562 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11563 alu.op = ALU_OP2_SETNE_INT;
11564 alu.dst.sel = treg;
11565 alu.dst.chan = 1;
11566 alu.dst.write = 1;
11567 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
11568 r600_bytecode_src(&alu.src[1], &ctx->src[1], 1);
11569 alu.last = 1;
11570 r = r600_bytecode_add_alu(ctx->bc, &alu);
11571 if (r)
11572 return r;
11573
11574 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
11575 alu.op = ALU_OP2_OR_INT;
11576 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
11577 alu.src[0].sel = treg;
11578 alu.src[0].chan = 0;
11579 alu.src[1].sel = treg;
11580 alu.src[1].chan = 1;
11581 alu.last = 1;
11582 r = r600_bytecode_add_alu(ctx->bc, &alu);
11583 if (r)
11584 return r;
11585 return 0;
11586 }
11587
11588 static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] = {
11589 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_r600_arl},
11590 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
11591 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
11592
11593 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, tgsi_trans_srcx_replicate},
11594
11595 [TGSI_OPCODE_RSQ] = { ALU_OP0_NOP, tgsi_rsq},
11596 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
11597 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
11598 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL_IEEE, tgsi_op2},
11599 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
11600 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
11601 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
11602 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
11603 /* MIN_DX10 returns non-nan result if one src is NaN, MIN returns NaN */
11604 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN_DX10, tgsi_op2},
11605 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX_DX10, tgsi_op2},
11606 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
11607 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
11608 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD_IEEE, tgsi_op3},
11609 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
11610 [TGSI_OPCODE_FMA] = { ALU_OP0_NOP, tgsi_unsupported},
11611 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, tgsi_trans_srcx_replicate},
11612 [21] = { ALU_OP0_NOP, tgsi_unsupported},
11613 [22] = { ALU_OP0_NOP, tgsi_unsupported},
11614 [23] = { ALU_OP0_NOP, tgsi_unsupported},
11615 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
11616 [25] = { ALU_OP0_NOP, tgsi_unsupported},
11617 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
11618 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
11619 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, tgsi_trans_srcx_replicate},
11620 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, tgsi_trans_srcx_replicate},
11621 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, tgsi_pow},
11622 [31] = { ALU_OP0_NOP, tgsi_unsupported},
11623 [32] = { ALU_OP0_NOP, tgsi_unsupported},
11624 [TGSI_OPCODE_CLOCK] = { ALU_OP0_NOP, tgsi_unsupported},
11625 [34] = { ALU_OP0_NOP, tgsi_unsupported},
11626 [35] = { ALU_OP0_NOP, tgsi_unsupported},
11627 [TGSI_OPCODE_COS] = { ALU_OP1_COS, tgsi_trig},
11628 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
11629 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
11630 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
11631 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_unsupported},
11632 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
11633 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
11634 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
11635 [44] = { ALU_OP0_NOP, tgsi_unsupported},
11636 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
11637 [46] = { ALU_OP0_NOP, tgsi_unsupported},
11638 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
11639 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, tgsi_trig},
11640 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
11641 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
11642 [51] = { ALU_OP0_NOP, tgsi_unsupported},
11643 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
11644 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
11645 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
11646 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_unsupported},
11647 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
11648 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
11649 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
11650 [59] = { ALU_OP0_NOP, tgsi_unsupported},
11651 [60] = { ALU_OP0_NOP, tgsi_unsupported},
11652 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_r600_arl},
11653 [62] = { ALU_OP0_NOP, tgsi_unsupported},
11654 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
11655 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
11656 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
11657 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
11658 [67] = { ALU_OP0_NOP, tgsi_unsupported},
11659 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
11660 [69] = { ALU_OP0_NOP, tgsi_unsupported},
11661 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
11662 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
11663 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
11664 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
11665 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
11666 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
11667 [76] = { ALU_OP0_NOP, tgsi_unsupported},
11668 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
11669 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
11670 [TGSI_OPCODE_DDX_FINE] = { ALU_OP0_NOP, tgsi_unsupported},
11671 [TGSI_OPCODE_DDY_FINE] = { ALU_OP0_NOP, tgsi_unsupported},
11672 [81] = { ALU_OP0_NOP, tgsi_unsupported},
11673 [82] = { ALU_OP0_NOP, tgsi_unsupported},
11674 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
11675 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2_trans},
11676 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
11677 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
11678 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2_trans},
11679 [88] = { ALU_OP0_NOP, tgsi_unsupported},
11680 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
11681 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
11682 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
11683 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
11684 [93] = { ALU_OP0_NOP, tgsi_unsupported},
11685 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
11686 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
11687 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
11688 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
11689 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
11690 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
11691 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
11692 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
11693 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
11694 [103] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
11695 [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
11696 [TGSI_OPCODE_RESQ] = { ALU_OP0_NOP, tgsi_unsupported},
11697 [106] = { ALU_OP0_NOP, tgsi_unsupported},
11698 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
11699 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
11700 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
11701 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
11702 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
11703 [TGSI_OPCODE_MEMBAR] = { ALU_OP0_NOP, tgsi_unsupported},
11704 [113] = { ALU_OP0_NOP, tgsi_unsupported},
11705 [114] = { ALU_OP0_NOP, tgsi_unsupported},
11706 [115] = { ALU_OP0_NOP, tgsi_unsupported},
11707 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
11708 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
11709 [TGSI_OPCODE_DFMA] = { ALU_OP0_NOP, tgsi_unsupported},
11710 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_op2_trans},
11711 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
11712 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
11713 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
11714 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
11715 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
11716 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2_trans},
11717 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
11718 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_op2_trans},
11719 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2_trans},
11720 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
11721 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
11722 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
11723 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
11724 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
11725 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
11726 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_UINT, tgsi_op2_trans},
11727 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
11728 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
11729 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2_trans},
11730 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
11731 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2_swap},
11732 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
11733 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
11734 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
11735 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
11736 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
11737 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
11738 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
11739 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
11740 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
11741 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
11742 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
11743 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
11744 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
11745 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
11746 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
11747 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
11748 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_r600_arl},
11749 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
11750 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
11751 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
11752 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_unsupported},
11753 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_unsupported},
11754 [163] = { ALU_OP0_NOP, tgsi_unsupported},
11755 [164] = { ALU_OP0_NOP, tgsi_unsupported},
11756 [165] = { ALU_OP0_NOP, tgsi_unsupported},
11757 [TGSI_OPCODE_BARRIER] = { ALU_OP0_NOP, tgsi_unsupported},
11758 [TGSI_OPCODE_ATOMUADD] = { ALU_OP0_NOP, tgsi_unsupported},
11759 [TGSI_OPCODE_ATOMXCHG] = { ALU_OP0_NOP, tgsi_unsupported},
11760 [TGSI_OPCODE_ATOMCAS] = { ALU_OP0_NOP, tgsi_unsupported},
11761 [TGSI_OPCODE_ATOMAND] = { ALU_OP0_NOP, tgsi_unsupported},
11762 [TGSI_OPCODE_ATOMOR] = { ALU_OP0_NOP, tgsi_unsupported},
11763 [TGSI_OPCODE_ATOMXOR] = { ALU_OP0_NOP, tgsi_unsupported},
11764 [TGSI_OPCODE_ATOMUMIN] = { ALU_OP0_NOP, tgsi_unsupported},
11765 [TGSI_OPCODE_ATOMUMAX] = { ALU_OP0_NOP, tgsi_unsupported},
11766 [TGSI_OPCODE_ATOMIMIN] = { ALU_OP0_NOP, tgsi_unsupported},
11767 [TGSI_OPCODE_ATOMIMAX] = { ALU_OP0_NOP, tgsi_unsupported},
11768 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
11769 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
11770 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
11771 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, tgsi_op2_trans},
11772 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, tgsi_op2_trans},
11773 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_unsupported},
11774 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_unsupported},
11775 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_unsupported},
11776 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_unsupported},
11777 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_unsupported},
11778 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_unsupported},
11779 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_unsupported},
11780 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_unsupported},
11781 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_unsupported},
11782 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_unsupported},
11783 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_unsupported},
11784 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_unsupported},
11785 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_unsupported},
11786 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
11787 };
11788
11789 static const struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = {
11790 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_eg_arl},
11791 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
11792 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
11793 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, tgsi_trans_srcx_replicate},
11794 [TGSI_OPCODE_RSQ] = { ALU_OP0_NOP, tgsi_rsq},
11795 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
11796 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
11797 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL_IEEE, tgsi_op2},
11798 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
11799 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
11800 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
11801 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
11802 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN_DX10, tgsi_op2},
11803 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX_DX10, tgsi_op2},
11804 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
11805 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
11806 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD_IEEE, tgsi_op3},
11807 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
11808 [TGSI_OPCODE_FMA] = { ALU_OP3_FMA, tgsi_op3},
11809 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, tgsi_trans_srcx_replicate},
11810 [21] = { ALU_OP0_NOP, tgsi_unsupported},
11811 [22] = { ALU_OP0_NOP, tgsi_unsupported},
11812 [23] = { ALU_OP0_NOP, tgsi_unsupported},
11813 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
11814 [25] = { ALU_OP0_NOP, tgsi_unsupported},
11815 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
11816 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
11817 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, tgsi_trans_srcx_replicate},
11818 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, tgsi_trans_srcx_replicate},
11819 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, tgsi_pow},
11820 [31] = { ALU_OP0_NOP, tgsi_unsupported},
11821 [32] = { ALU_OP0_NOP, tgsi_unsupported},
11822 [TGSI_OPCODE_CLOCK] = { ALU_OP0_NOP, tgsi_clock},
11823 [34] = { ALU_OP0_NOP, tgsi_unsupported},
11824 [35] = { ALU_OP0_NOP, tgsi_unsupported},
11825 [TGSI_OPCODE_COS] = { ALU_OP1_COS, tgsi_trig},
11826 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
11827 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
11828 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
11829 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_pk2h},
11830 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
11831 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
11832 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
11833 [44] = { ALU_OP0_NOP, tgsi_unsupported},
11834 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
11835 [46] = { ALU_OP0_NOP, tgsi_unsupported},
11836 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
11837 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, tgsi_trig},
11838 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
11839 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
11840 [51] = { ALU_OP0_NOP, tgsi_unsupported},
11841 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
11842 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
11843 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
11844 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_up2h},
11845 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
11846 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
11847 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
11848 [59] = { ALU_OP0_NOP, tgsi_unsupported},
11849 [60] = { ALU_OP0_NOP, tgsi_unsupported},
11850 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_eg_arl},
11851 [62] = { ALU_OP0_NOP, tgsi_unsupported},
11852 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
11853 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
11854 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
11855 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
11856 [67] = { ALU_OP0_NOP, tgsi_unsupported},
11857 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
11858 [69] = { ALU_OP0_NOP, tgsi_unsupported},
11859 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
11860 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
11861 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
11862 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
11863 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
11864 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
11865 [76] = { ALU_OP0_NOP, tgsi_unsupported},
11866 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
11867 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
11868 [TGSI_OPCODE_DDX_FINE] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
11869 [TGSI_OPCODE_DDY_FINE] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
11870 [82] = { ALU_OP0_NOP, tgsi_unsupported},
11871 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
11872 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2_trans},
11873 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
11874 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
11875 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2},
11876 [88] = { ALU_OP0_NOP, tgsi_unsupported},
11877 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
11878 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
11879 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
11880 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
11881 [93] = { ALU_OP0_NOP, tgsi_unsupported},
11882 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
11883 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
11884 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
11885 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
11886 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
11887 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
11888 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
11889 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
11890 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
11891 [103] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
11892 [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
11893 [TGSI_OPCODE_RESQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_resq},
11894 [106] = { ALU_OP0_NOP, tgsi_unsupported},
11895 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
11896 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
11897 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
11898 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
11899 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
11900 [TGSI_OPCODE_MEMBAR] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier},
11901 [113] = { ALU_OP0_NOP, tgsi_unsupported},
11902 [114] = { ALU_OP0_NOP, tgsi_unsupported},
11903 [115] = { ALU_OP0_NOP, tgsi_unsupported},
11904 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
11905 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
11906 /* Refer below for TGSI_OPCODE_DFMA */
11907 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_f2i},
11908 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
11909 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
11910 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
11911 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
11912 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
11913 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2},
11914 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
11915 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_f2i},
11916 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2_trans},
11917 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
11918 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
11919 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
11920 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
11921 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
11922 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
11923 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_UINT, tgsi_op2_trans},
11924 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
11925 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
11926 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2},
11927 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
11928 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2},
11929 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
11930 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
11931 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
11932 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
11933 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
11934 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
11935 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
11936 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
11937 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
11938 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
11939 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
11940 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
11941 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
11942 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
11943 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
11944 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
11945 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_eg_arl},
11946 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
11947 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
11948 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
11949 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_load},
11950 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_store},
11951 [163] = { ALU_OP0_NOP, tgsi_unsupported},
11952 [164] = { ALU_OP0_NOP, tgsi_unsupported},
11953 [165] = { ALU_OP0_NOP, tgsi_unsupported},
11954 [TGSI_OPCODE_BARRIER] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier},
11955 [TGSI_OPCODE_ATOMUADD] = { V_RAT_INST_ADD_RTN, tgsi_atomic_op},
11956 [TGSI_OPCODE_ATOMXCHG] = { V_RAT_INST_XCHG_RTN, tgsi_atomic_op},
11957 [TGSI_OPCODE_ATOMCAS] = { V_RAT_INST_CMPXCHG_INT_RTN, tgsi_atomic_op},
11958 [TGSI_OPCODE_ATOMAND] = { V_RAT_INST_AND_RTN, tgsi_atomic_op},
11959 [TGSI_OPCODE_ATOMOR] = { V_RAT_INST_OR_RTN, tgsi_atomic_op},
11960 [TGSI_OPCODE_ATOMXOR] = { V_RAT_INST_XOR_RTN, tgsi_atomic_op},
11961 [TGSI_OPCODE_ATOMUMIN] = { V_RAT_INST_MIN_UINT_RTN, tgsi_atomic_op},
11962 [TGSI_OPCODE_ATOMUMAX] = { V_RAT_INST_MAX_UINT_RTN, tgsi_atomic_op},
11963 [TGSI_OPCODE_ATOMIMIN] = { V_RAT_INST_MIN_INT_RTN, tgsi_atomic_op},
11964 [TGSI_OPCODE_ATOMIMAX] = { V_RAT_INST_MAX_INT_RTN, tgsi_atomic_op},
11965 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
11966 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
11967 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
11968 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, tgsi_op2_trans},
11969 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, tgsi_op2_trans},
11970 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_tex},
11971 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_tex},
11972 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_bfe},
11973 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_bfe},
11974 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_bfi},
11975 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_op2},
11976 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_op2},
11977 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_op2},
11978 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_msb},
11979 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_msb},
11980 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_interp_egcm},
11981 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_interp_egcm},
11982 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_interp_egcm},
11983 [TGSI_OPCODE_F2D] = { ALU_OP1_FLT32_TO_FLT64, tgsi_op2_64},
11984 [TGSI_OPCODE_D2F] = { ALU_OP1_FLT64_TO_FLT32, tgsi_op2_64_single_dest},
11985 [TGSI_OPCODE_DABS] = { ALU_OP1_MOV, tgsi_op2_64},
11986 [TGSI_OPCODE_DNEG] = { ALU_OP2_ADD_64, tgsi_dneg},
11987 [TGSI_OPCODE_DADD] = { ALU_OP2_ADD_64, tgsi_op2_64},
11988 [TGSI_OPCODE_DMUL] = { ALU_OP2_MUL_64, cayman_mul_double_instr},
11989 [TGSI_OPCODE_DDIV] = { 0, cayman_ddiv_instr },
11990 [TGSI_OPCODE_DMAX] = { ALU_OP2_MAX_64, tgsi_op2_64},
11991 [TGSI_OPCODE_DMIN] = { ALU_OP2_MIN_64, tgsi_op2_64},
11992 [TGSI_OPCODE_DSLT] = { ALU_OP2_SETGT_64, tgsi_op2_64_single_dest_s},
11993 [TGSI_OPCODE_DSGE] = { ALU_OP2_SETGE_64, tgsi_op2_64_single_dest},
11994 [TGSI_OPCODE_DSEQ] = { ALU_OP2_SETE_64, tgsi_op2_64_single_dest},
11995 [TGSI_OPCODE_DSNE] = { ALU_OP2_SETNE_64, tgsi_op2_64_single_dest},
11996 [TGSI_OPCODE_DRCP] = { ALU_OP2_RECIP_64, cayman_emit_double_instr},
11997 [TGSI_OPCODE_DSQRT] = { ALU_OP2_SQRT_64, cayman_emit_double_instr},
11998 [TGSI_OPCODE_DMAD] = { ALU_OP3_FMA_64, tgsi_op3_64},
11999 [TGSI_OPCODE_DFMA] = { ALU_OP3_FMA_64, tgsi_op3_64},
12000 [TGSI_OPCODE_DFRAC] = { ALU_OP1_FRACT_64, tgsi_op2_64},
12001 [TGSI_OPCODE_DLDEXP] = { ALU_OP2_LDEXP_64, tgsi_op2_64},
12002 [TGSI_OPCODE_DFRACEXP] = { ALU_OP1_FREXP_64, tgsi_dfracexp},
12003 [TGSI_OPCODE_D2I] = { ALU_OP1_FLT_TO_INT, egcm_double_to_int},
12004 [TGSI_OPCODE_I2D] = { ALU_OP1_INT_TO_FLT, egcm_int_to_double},
12005 [TGSI_OPCODE_D2U] = { ALU_OP1_FLT_TO_UINT, egcm_double_to_int},
12006 [TGSI_OPCODE_U2D] = { ALU_OP1_UINT_TO_FLT, egcm_int_to_double},
12007 [TGSI_OPCODE_DRSQ] = { ALU_OP2_RECIPSQRT_64, cayman_emit_double_instr},
12008 [TGSI_OPCODE_U64SNE] = { ALU_OP0_NOP, egcm_u64sne },
12009 [TGSI_OPCODE_U64ADD] = { ALU_OP0_NOP, egcm_u64add },
12010 [TGSI_OPCODE_U64MUL] = { ALU_OP0_NOP, egcm_u64mul },
12011 [TGSI_OPCODE_U64DIV] = { ALU_OP0_NOP, egcm_u64div },
12012 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
12013 };
12014
12015 static const struct r600_shader_tgsi_instruction cm_shader_tgsi_instruction[] = {
12016 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_eg_arl},
12017 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
12018 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
12019 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, cayman_emit_float_instr},
12020 [TGSI_OPCODE_RSQ] = { ALU_OP1_RECIPSQRT_IEEE, cayman_emit_float_instr},
12021 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
12022 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
12023 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL_IEEE, tgsi_op2},
12024 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
12025 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
12026 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
12027 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
12028 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN_DX10, tgsi_op2},
12029 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX_DX10, tgsi_op2},
12030 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
12031 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
12032 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD_IEEE, tgsi_op3},
12033 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
12034 [TGSI_OPCODE_FMA] = { ALU_OP3_FMA, tgsi_op3},
12035 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, cayman_emit_float_instr},
12036 [21] = { ALU_OP0_NOP, tgsi_unsupported},
12037 [22] = { ALU_OP0_NOP, tgsi_unsupported},
12038 [23] = { ALU_OP0_NOP, tgsi_unsupported},
12039 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
12040 [25] = { ALU_OP0_NOP, tgsi_unsupported},
12041 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
12042 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
12043 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, cayman_emit_float_instr},
12044 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, cayman_emit_float_instr},
12045 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, cayman_pow},
12046 [31] = { ALU_OP0_NOP, tgsi_unsupported},
12047 [32] = { ALU_OP0_NOP, tgsi_unsupported},
12048 [TGSI_OPCODE_CLOCK] = { ALU_OP0_NOP, tgsi_clock},
12049 [34] = { ALU_OP0_NOP, tgsi_unsupported},
12050 [35] = { ALU_OP0_NOP, tgsi_unsupported},
12051 [TGSI_OPCODE_COS] = { ALU_OP1_COS, cayman_trig},
12052 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
12053 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
12054 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
12055 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_pk2h},
12056 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
12057 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
12058 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
12059 [44] = { ALU_OP0_NOP, tgsi_unsupported},
12060 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
12061 [46] = { ALU_OP0_NOP, tgsi_unsupported},
12062 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
12063 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, cayman_trig},
12064 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
12065 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
12066 [51] = { ALU_OP0_NOP, tgsi_unsupported},
12067 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
12068 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
12069 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
12070 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_up2h},
12071 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
12072 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
12073 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
12074 [59] = { ALU_OP0_NOP, tgsi_unsupported},
12075 [60] = { ALU_OP0_NOP, tgsi_unsupported},
12076 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_eg_arl},
12077 [62] = { ALU_OP0_NOP, tgsi_unsupported},
12078 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
12079 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
12080 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
12081 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
12082 [67] = { ALU_OP0_NOP, tgsi_unsupported},
12083 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
12084 [69] = { ALU_OP0_NOP, tgsi_unsupported},
12085 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
12086 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
12087 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
12088 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
12089 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
12090 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
12091 [76] = { ALU_OP0_NOP, tgsi_unsupported},
12092 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
12093 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
12094 [TGSI_OPCODE_DDX_FINE] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
12095 [TGSI_OPCODE_DDY_FINE] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
12096 [82] = { ALU_OP0_NOP, tgsi_unsupported},
12097 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
12098 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2},
12099 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
12100 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
12101 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2},
12102 [88] = { ALU_OP0_NOP, tgsi_unsupported},
12103 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
12104 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
12105 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
12106 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
12107 [93] = { ALU_OP0_NOP, tgsi_unsupported},
12108 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
12109 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
12110 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
12111 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
12112 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
12113 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
12114 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
12115 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
12116 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
12117 [103] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
12118 [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
12119 [TGSI_OPCODE_RESQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_resq},
12120 [106] = { ALU_OP0_NOP, tgsi_unsupported},
12121 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
12122 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
12123 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
12124 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
12125 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
12126 [TGSI_OPCODE_MEMBAR] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier},
12127 [113] = { ALU_OP0_NOP, tgsi_unsupported},
12128 [114] = { ALU_OP0_NOP, tgsi_unsupported},
12129 [115] = { ALU_OP0_NOP, tgsi_unsupported},
12130 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
12131 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
12132 /* Refer below for TGSI_OPCODE_DFMA */
12133 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_op2},
12134 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
12135 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
12136 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
12137 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
12138 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
12139 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2},
12140 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
12141 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_op2},
12142 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2},
12143 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
12144 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
12145 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
12146 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
12147 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
12148 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
12149 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_INT, cayman_mul_int_instr},
12150 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
12151 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
12152 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2},
12153 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
12154 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2},
12155 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
12156 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
12157 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
12158 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
12159 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
12160 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
12161 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
12162 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
12163 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
12164 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
12165 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
12166 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
12167 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
12168 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
12169 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
12170 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
12171 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_eg_arl},
12172 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
12173 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
12174 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
12175 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_load},
12176 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_store},
12177 [163] = { ALU_OP0_NOP, tgsi_unsupported},
12178 [164] = { ALU_OP0_NOP, tgsi_unsupported},
12179 [165] = { ALU_OP0_NOP, tgsi_unsupported},
12180 [TGSI_OPCODE_BARRIER] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier},
12181 [TGSI_OPCODE_ATOMUADD] = { V_RAT_INST_ADD_RTN, tgsi_atomic_op},
12182 [TGSI_OPCODE_ATOMXCHG] = { V_RAT_INST_XCHG_RTN, tgsi_atomic_op},
12183 [TGSI_OPCODE_ATOMCAS] = { V_RAT_INST_CMPXCHG_INT_RTN, tgsi_atomic_op},
12184 [TGSI_OPCODE_ATOMAND] = { V_RAT_INST_AND_RTN, tgsi_atomic_op},
12185 [TGSI_OPCODE_ATOMOR] = { V_RAT_INST_OR_RTN, tgsi_atomic_op},
12186 [TGSI_OPCODE_ATOMXOR] = { V_RAT_INST_XOR_RTN, tgsi_atomic_op},
12187 [TGSI_OPCODE_ATOMUMIN] = { V_RAT_INST_MIN_UINT_RTN, tgsi_atomic_op},
12188 [TGSI_OPCODE_ATOMUMAX] = { V_RAT_INST_MAX_UINT_RTN, tgsi_atomic_op},
12189 [TGSI_OPCODE_ATOMIMIN] = { V_RAT_INST_MIN_INT_RTN, tgsi_atomic_op},
12190 [TGSI_OPCODE_ATOMIMAX] = { V_RAT_INST_MAX_INT_RTN, tgsi_atomic_op},
12191 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
12192 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
12193 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
12194 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, cayman_mul_int_instr},
12195 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, cayman_mul_int_instr},
12196 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_tex},
12197 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_tex},
12198 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_bfe},
12199 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_bfe},
12200 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_bfi},
12201 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_op2},
12202 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_op2},
12203 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_op2},
12204 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_msb},
12205 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_msb},
12206 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_interp_egcm},
12207 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_interp_egcm},
12208 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_interp_egcm},
12209 [TGSI_OPCODE_F2D] = { ALU_OP1_FLT32_TO_FLT64, tgsi_op2_64},
12210 [TGSI_OPCODE_D2F] = { ALU_OP1_FLT64_TO_FLT32, tgsi_op2_64_single_dest},
12211 [TGSI_OPCODE_DABS] = { ALU_OP1_MOV, tgsi_op2_64},
12212 [TGSI_OPCODE_DNEG] = { ALU_OP2_ADD_64, tgsi_dneg},
12213 [TGSI_OPCODE_DADD] = { ALU_OP2_ADD_64, tgsi_op2_64},
12214 [TGSI_OPCODE_DMUL] = { ALU_OP2_MUL_64, cayman_mul_double_instr},
12215 [TGSI_OPCODE_DDIV] = { 0, cayman_ddiv_instr },
12216 [TGSI_OPCODE_DMAX] = { ALU_OP2_MAX_64, tgsi_op2_64},
12217 [TGSI_OPCODE_DMIN] = { ALU_OP2_MIN_64, tgsi_op2_64},
12218 [TGSI_OPCODE_DSLT] = { ALU_OP2_SETGT_64, tgsi_op2_64_single_dest_s},
12219 [TGSI_OPCODE_DSGE] = { ALU_OP2_SETGE_64, tgsi_op2_64_single_dest},
12220 [TGSI_OPCODE_DSEQ] = { ALU_OP2_SETE_64, tgsi_op2_64_single_dest},
12221 [TGSI_OPCODE_DSNE] = { ALU_OP2_SETNE_64, tgsi_op2_64_single_dest},
12222 [TGSI_OPCODE_DRCP] = { ALU_OP2_RECIP_64, cayman_emit_double_instr},
12223 [TGSI_OPCODE_DSQRT] = { ALU_OP2_SQRT_64, cayman_emit_double_instr},
12224 [TGSI_OPCODE_DMAD] = { ALU_OP3_FMA_64, tgsi_op3_64},
12225 [TGSI_OPCODE_DFMA] = { ALU_OP3_FMA_64, tgsi_op3_64},
12226 [TGSI_OPCODE_DFRAC] = { ALU_OP1_FRACT_64, tgsi_op2_64},
12227 [TGSI_OPCODE_DLDEXP] = { ALU_OP2_LDEXP_64, tgsi_op2_64},
12228 [TGSI_OPCODE_DFRACEXP] = { ALU_OP1_FREXP_64, tgsi_dfracexp},
12229 [TGSI_OPCODE_D2I] = { ALU_OP1_FLT_TO_INT, egcm_double_to_int},
12230 [TGSI_OPCODE_I2D] = { ALU_OP1_INT_TO_FLT, egcm_int_to_double},
12231 [TGSI_OPCODE_D2U] = { ALU_OP1_FLT_TO_UINT, egcm_double_to_int},
12232 [TGSI_OPCODE_U2D] = { ALU_OP1_UINT_TO_FLT, egcm_int_to_double},
12233 [TGSI_OPCODE_DRSQ] = { ALU_OP2_RECIPSQRT_64, cayman_emit_double_instr},
12234 [TGSI_OPCODE_U64SNE] = { ALU_OP0_NOP, egcm_u64sne },
12235 [TGSI_OPCODE_U64ADD] = { ALU_OP0_NOP, egcm_u64add },
12236 [TGSI_OPCODE_U64MUL] = { ALU_OP0_NOP, egcm_u64mul },
12237 [TGSI_OPCODE_U64DIV] = { ALU_OP0_NOP, egcm_u64div },
12238 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
12239 };