r600: Emit EOP for more CF instruction types
[mesa.git] / src / gallium / drivers / r600 / r600_shader.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "r600_sq.h"
24 #include "r600_formats.h"
25 #include "r600_opcodes.h"
26 #include "r600_shader.h"
27 #include "r600d.h"
28
29 #include "sb/sb_public.h"
30
31 #include "pipe/p_shader_tokens.h"
32 #include "tgsi/tgsi_info.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "tgsi/tgsi_scan.h"
35 #include "tgsi/tgsi_dump.h"
36 #include "util/u_bitcast.h"
37 #include "util/u_memory.h"
38 #include "util/u_math.h"
39 #include <stdio.h>
40 #include <errno.h>
41
42 /* CAYMAN notes
43 Why CAYMAN got loops for lots of instructions is explained here.
44
45 -These 8xx t-slot only ops are implemented in all vector slots.
46 MUL_LIT, FLT_TO_UINT, INT_TO_FLT, UINT_TO_FLT
47 These 8xx t-slot only opcodes become vector ops, with all four
48 slots expecting the arguments on sources a and b. Result is
49 broadcast to all channels.
50 MULLO_INT, MULHI_INT, MULLO_UINT, MULHI_UINT, MUL_64
51 These 8xx t-slot only opcodes become vector ops in the z, y, and
52 x slots.
53 EXP_IEEE, LOG_IEEE/CLAMPED, RECIP_IEEE/CLAMPED/FF/INT/UINT/_64/CLAMPED_64
54 RECIPSQRT_IEEE/CLAMPED/FF/_64/CLAMPED_64
55 SQRT_IEEE/_64
56 SIN/COS
57 The w slot may have an independent co-issued operation, or if the
58 result is required to be in the w slot, the opcode above may be
59 issued in the w slot as well.
60 The compiler must issue the source argument to slots z, y, and x
61 */
62
63 /* Contents of r0 on entry to various shaders
64
65 VS - .x = VertexID
66 .y = RelVertexID (??)
67 .w = InstanceID
68
69 GS - r0.xyw, r1.xyz = per-vertex offsets
70 r0.z = PrimitiveID
71
72 TCS - .x = PatchID
73 .y = RelPatchID (??)
74 .z = InvocationID
75 .w = tess factor base.
76
77 TES - .x = TessCoord.x
78 - .y = TessCoord.y
79 - .z = RelPatchID (??)
80 - .w = PrimitiveID
81
82 PS - face_gpr.z = SampleMask
83 face_gpr.w = SampleID
84 */
85 #define R600_SHADER_BUFFER_INFO_SEL (512 + R600_BUFFER_INFO_OFFSET / 16)
86 static int r600_shader_from_tgsi(struct r600_context *rctx,
87 struct r600_pipe_shader *pipeshader,
88 union r600_shader_key key);
89
90 static void r600_add_gpr_array(struct r600_shader *ps, int start_gpr,
91 int size, unsigned comp_mask) {
92
93 if (!size)
94 return;
95
96 if (ps->num_arrays == ps->max_arrays) {
97 ps->max_arrays += 64;
98 ps->arrays = realloc(ps->arrays, ps->max_arrays *
99 sizeof(struct r600_shader_array));
100 }
101
102 int n = ps->num_arrays;
103 ++ps->num_arrays;
104
105 ps->arrays[n].comp_mask = comp_mask;
106 ps->arrays[n].gpr_start = start_gpr;
107 ps->arrays[n].gpr_count = size;
108 }
109
110 static void r600_dump_streamout(struct pipe_stream_output_info *so)
111 {
112 unsigned i;
113
114 fprintf(stderr, "STREAMOUT\n");
115 for (i = 0; i < so->num_outputs; i++) {
116 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
117 so->output[i].start_component;
118 fprintf(stderr, " %i: MEM_STREAM%d_BUF%i[%i..%i] <- OUT[%i].%s%s%s%s%s\n",
119 i,
120 so->output[i].stream,
121 so->output[i].output_buffer,
122 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
123 so->output[i].register_index,
124 mask & 1 ? "x" : "",
125 mask & 2 ? "y" : "",
126 mask & 4 ? "z" : "",
127 mask & 8 ? "w" : "",
128 so->output[i].dst_offset < so->output[i].start_component ? " (will lower)" : "");
129 }
130 }
131
132 static int store_shader(struct pipe_context *ctx,
133 struct r600_pipe_shader *shader)
134 {
135 struct r600_context *rctx = (struct r600_context *)ctx;
136 uint32_t *ptr, i;
137
138 if (shader->bo == NULL) {
139 shader->bo = (struct r600_resource*)
140 pipe_buffer_create(ctx->screen, 0, PIPE_USAGE_IMMUTABLE, shader->shader.bc.ndw * 4);
141 if (shader->bo == NULL) {
142 return -ENOMEM;
143 }
144 ptr = r600_buffer_map_sync_with_rings(&rctx->b, shader->bo, PIPE_TRANSFER_WRITE);
145 if (R600_BIG_ENDIAN) {
146 for (i = 0; i < shader->shader.bc.ndw; ++i) {
147 ptr[i] = util_cpu_to_le32(shader->shader.bc.bytecode[i]);
148 }
149 } else {
150 memcpy(ptr, shader->shader.bc.bytecode, shader->shader.bc.ndw * sizeof(*ptr));
151 }
152 rctx->b.ws->buffer_unmap(shader->bo->buf);
153 }
154
155 return 0;
156 }
157
158 int r600_pipe_shader_create(struct pipe_context *ctx,
159 struct r600_pipe_shader *shader,
160 union r600_shader_key key)
161 {
162 struct r600_context *rctx = (struct r600_context *)ctx;
163 struct r600_pipe_shader_selector *sel = shader->selector;
164 int r;
165 bool dump = r600_can_dump_shader(&rctx->screen->b,
166 tgsi_get_processor_type(sel->tokens));
167 unsigned use_sb = !(rctx->screen->b.debug_flags & DBG_NO_SB);
168 unsigned sb_disasm = use_sb || (rctx->screen->b.debug_flags & DBG_SB_DISASM);
169 unsigned export_shader;
170
171 shader->shader.bc.isa = rctx->isa;
172
173 if (dump) {
174 fprintf(stderr, "--------------------------------------------------------------\n");
175 tgsi_dump(sel->tokens, 0);
176
177 if (sel->so.num_outputs) {
178 r600_dump_streamout(&sel->so);
179 }
180 }
181 r = r600_shader_from_tgsi(rctx, shader, key);
182 if (r) {
183 R600_ERR("translation from TGSI failed !\n");
184 goto error;
185 }
186 if (shader->shader.processor_type == PIPE_SHADER_VERTEX) {
187 /* only disable for vertex shaders in tess paths */
188 if (key.vs.as_ls)
189 use_sb = 0;
190 }
191 use_sb &= (shader->shader.processor_type != PIPE_SHADER_TESS_CTRL);
192 use_sb &= (shader->shader.processor_type != PIPE_SHADER_TESS_EVAL);
193
194 /* disable SB for shaders using doubles */
195 use_sb &= !shader->shader.uses_doubles;
196
197 use_sb &= !shader->shader.uses_atomics;
198 use_sb &= !shader->shader.uses_images;
199
200 /* Check if the bytecode has already been built. */
201 if (!shader->shader.bc.bytecode) {
202 r = r600_bytecode_build(&shader->shader.bc);
203 if (r) {
204 R600_ERR("building bytecode failed !\n");
205 goto error;
206 }
207 }
208
209 if (dump && !sb_disasm) {
210 fprintf(stderr, "--------------------------------------------------------------\n");
211 r600_bytecode_disasm(&shader->shader.bc);
212 fprintf(stderr, "______________________________________________________________\n");
213 } else if ((dump && sb_disasm) || use_sb) {
214 r = r600_sb_bytecode_process(rctx, &shader->shader.bc, &shader->shader,
215 dump, use_sb);
216 if (r) {
217 R600_ERR("r600_sb_bytecode_process failed !\n");
218 goto error;
219 }
220 }
221
222 if (shader->gs_copy_shader) {
223 if (dump) {
224 // dump copy shader
225 r = r600_sb_bytecode_process(rctx, &shader->gs_copy_shader->shader.bc,
226 &shader->gs_copy_shader->shader, dump, 0);
227 if (r)
228 goto error;
229 }
230
231 if ((r = store_shader(ctx, shader->gs_copy_shader)))
232 goto error;
233 }
234
235 /* Store the shader in a buffer. */
236 if ((r = store_shader(ctx, shader)))
237 goto error;
238
239 /* Build state. */
240 switch (shader->shader.processor_type) {
241 case PIPE_SHADER_TESS_CTRL:
242 evergreen_update_hs_state(ctx, shader);
243 break;
244 case PIPE_SHADER_TESS_EVAL:
245 if (key.tes.as_es)
246 evergreen_update_es_state(ctx, shader);
247 else
248 evergreen_update_vs_state(ctx, shader);
249 break;
250 case PIPE_SHADER_GEOMETRY:
251 if (rctx->b.chip_class >= EVERGREEN) {
252 evergreen_update_gs_state(ctx, shader);
253 evergreen_update_vs_state(ctx, shader->gs_copy_shader);
254 } else {
255 r600_update_gs_state(ctx, shader);
256 r600_update_vs_state(ctx, shader->gs_copy_shader);
257 }
258 break;
259 case PIPE_SHADER_VERTEX:
260 export_shader = key.vs.as_es;
261 if (rctx->b.chip_class >= EVERGREEN) {
262 if (key.vs.as_ls)
263 evergreen_update_ls_state(ctx, shader);
264 else if (key.vs.as_es)
265 evergreen_update_es_state(ctx, shader);
266 else
267 evergreen_update_vs_state(ctx, shader);
268 } else {
269 if (export_shader)
270 r600_update_es_state(ctx, shader);
271 else
272 r600_update_vs_state(ctx, shader);
273 }
274 break;
275 case PIPE_SHADER_FRAGMENT:
276 if (rctx->b.chip_class >= EVERGREEN) {
277 evergreen_update_ps_state(ctx, shader);
278 } else {
279 r600_update_ps_state(ctx, shader);
280 }
281 break;
282 default:
283 r = -EINVAL;
284 goto error;
285 }
286 return 0;
287
288 error:
289 r600_pipe_shader_destroy(ctx, shader);
290 return r;
291 }
292
293 void r600_pipe_shader_destroy(struct pipe_context *ctx, struct r600_pipe_shader *shader)
294 {
295 r600_resource_reference(&shader->bo, NULL);
296 r600_bytecode_clear(&shader->shader.bc);
297 r600_release_command_buffer(&shader->command_buffer);
298 }
299
300 /*
301 * tgsi -> r600 shader
302 */
303 struct r600_shader_tgsi_instruction;
304
305 struct r600_shader_src {
306 unsigned sel;
307 unsigned swizzle[4];
308 unsigned neg;
309 unsigned abs;
310 unsigned rel;
311 unsigned kc_bank;
312 boolean kc_rel; /* true if cache bank is indexed */
313 uint32_t value[4];
314 };
315
316 struct eg_interp {
317 boolean enabled;
318 unsigned ij_index;
319 };
320
321 struct r600_shader_ctx {
322 struct tgsi_shader_info info;
323 struct tgsi_parse_context parse;
324 const struct tgsi_token *tokens;
325 unsigned type;
326 unsigned file_offset[TGSI_FILE_COUNT];
327 unsigned temp_reg;
328 const struct r600_shader_tgsi_instruction *inst_info;
329 struct r600_bytecode *bc;
330 struct r600_shader *shader;
331 struct r600_shader_src src[4];
332 uint32_t *literals;
333 uint32_t nliterals;
334 uint32_t max_driver_temp_used;
335 /* needed for evergreen interpolation */
336 struct eg_interp eg_interpolators[6]; // indexed by Persp/Linear * 3 + sample/center/centroid
337 /* evergreen/cayman also store sample mask in face register */
338 int face_gpr;
339 /* sample id is .w component stored in fixed point position register */
340 int fixed_pt_position_gpr;
341 int colors_used;
342 boolean clip_vertex_write;
343 unsigned cv_output;
344 unsigned edgeflag_output;
345 int fragcoord_input;
346 int native_integers;
347 int next_ring_offset;
348 int gs_out_ring_offset;
349 int gs_next_vertex;
350 struct r600_shader *gs_for_vs;
351 int gs_export_gpr_tregs[4];
352 int gs_rotated_input[2];
353 const struct pipe_stream_output_info *gs_stream_output_info;
354 unsigned enabled_stream_buffers_mask;
355 unsigned tess_input_info; /* temp with tess input offsets */
356 unsigned tess_output_info; /* temp with tess input offsets */
357 unsigned thread_id_gpr; /* temp with thread id calculated for images */
358 };
359
360 struct r600_shader_tgsi_instruction {
361 unsigned op;
362 int (*process)(struct r600_shader_ctx *ctx);
363 };
364
365 static int emit_gs_ring_writes(struct r600_shader_ctx *ctx, const struct pipe_stream_output_info *so, int stream, bool ind);
366 static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[], eg_shader_tgsi_instruction[], cm_shader_tgsi_instruction[];
367 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx);
368 static inline void callstack_push(struct r600_shader_ctx *ctx, unsigned reason);
369 static void fc_pushlevel(struct r600_shader_ctx *ctx, int type);
370 static int tgsi_else(struct r600_shader_ctx *ctx);
371 static int tgsi_endif(struct r600_shader_ctx *ctx);
372 static int tgsi_bgnloop(struct r600_shader_ctx *ctx);
373 static int tgsi_endloop(struct r600_shader_ctx *ctx);
374 static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx);
375 static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx,
376 unsigned int cb_idx, unsigned cb_rel, unsigned int offset, unsigned ar_chan,
377 unsigned int dst_reg);
378 static void r600_bytecode_src(struct r600_bytecode_alu_src *bc_src,
379 const struct r600_shader_src *shader_src,
380 unsigned chan);
381 static int do_lds_fetch_values(struct r600_shader_ctx *ctx, unsigned temp_reg,
382 unsigned dst_reg);
383
384 static int tgsi_last_instruction(unsigned writemask)
385 {
386 int i, lasti = 0;
387
388 for (i = 0; i < 4; i++) {
389 if (writemask & (1 << i)) {
390 lasti = i;
391 }
392 }
393 return lasti;
394 }
395
396 static int tgsi_is_supported(struct r600_shader_ctx *ctx)
397 {
398 struct tgsi_full_instruction *i = &ctx->parse.FullToken.FullInstruction;
399 unsigned j;
400
401 if (i->Instruction.NumDstRegs > 1 && i->Instruction.Opcode != TGSI_OPCODE_DFRACEXP) {
402 R600_ERR("too many dst (%d)\n", i->Instruction.NumDstRegs);
403 return -EINVAL;
404 }
405 #if 0
406 if (i->Instruction.Label) {
407 R600_ERR("label unsupported\n");
408 return -EINVAL;
409 }
410 #endif
411 for (j = 0; j < i->Instruction.NumSrcRegs; j++) {
412 if (i->Src[j].Register.Dimension) {
413 switch (i->Src[j].Register.File) {
414 case TGSI_FILE_CONSTANT:
415 case TGSI_FILE_HW_ATOMIC:
416 break;
417 case TGSI_FILE_INPUT:
418 if (ctx->type == PIPE_SHADER_GEOMETRY ||
419 ctx->type == PIPE_SHADER_TESS_CTRL ||
420 ctx->type == PIPE_SHADER_TESS_EVAL)
421 break;
422 case TGSI_FILE_OUTPUT:
423 if (ctx->type == PIPE_SHADER_TESS_CTRL)
424 break;
425 default:
426 R600_ERR("unsupported src %d (file %d, dimension %d)\n", j,
427 i->Src[j].Register.File,
428 i->Src[j].Register.Dimension);
429 return -EINVAL;
430 }
431 }
432 }
433 for (j = 0; j < i->Instruction.NumDstRegs; j++) {
434 if (i->Dst[j].Register.Dimension) {
435 if (ctx->type == PIPE_SHADER_TESS_CTRL)
436 continue;
437 R600_ERR("unsupported dst (dimension)\n");
438 return -EINVAL;
439 }
440 }
441 return 0;
442 }
443
444 int eg_get_interpolator_index(unsigned interpolate, unsigned location)
445 {
446 if (interpolate == TGSI_INTERPOLATE_COLOR ||
447 interpolate == TGSI_INTERPOLATE_LINEAR ||
448 interpolate == TGSI_INTERPOLATE_PERSPECTIVE)
449 {
450 int is_linear = interpolate == TGSI_INTERPOLATE_LINEAR;
451 int loc;
452
453 switch(location) {
454 case TGSI_INTERPOLATE_LOC_CENTER:
455 loc = 1;
456 break;
457 case TGSI_INTERPOLATE_LOC_CENTROID:
458 loc = 2;
459 break;
460 case TGSI_INTERPOLATE_LOC_SAMPLE:
461 default:
462 loc = 0; break;
463 }
464
465 return is_linear * 3 + loc;
466 }
467
468 return -1;
469 }
470
471 static void evergreen_interp_assign_ij_index(struct r600_shader_ctx *ctx,
472 int input)
473 {
474 int i = eg_get_interpolator_index(
475 ctx->shader->input[input].interpolate,
476 ctx->shader->input[input].interpolate_location);
477 assert(i >= 0);
478 ctx->shader->input[input].ij_index = ctx->eg_interpolators[i].ij_index;
479 }
480
481 static int evergreen_interp_alu(struct r600_shader_ctx *ctx, int input)
482 {
483 int i, r;
484 struct r600_bytecode_alu alu;
485 int gpr = 0, base_chan = 0;
486 int ij_index = ctx->shader->input[input].ij_index;
487
488 /* work out gpr and base_chan from index */
489 gpr = ij_index / 2;
490 base_chan = (2 * (ij_index % 2)) + 1;
491
492 for (i = 0; i < 8; i++) {
493 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
494
495 if (i < 4)
496 alu.op = ALU_OP2_INTERP_ZW;
497 else
498 alu.op = ALU_OP2_INTERP_XY;
499
500 if ((i > 1) && (i < 6)) {
501 alu.dst.sel = ctx->shader->input[input].gpr;
502 alu.dst.write = 1;
503 }
504
505 alu.dst.chan = i % 4;
506
507 alu.src[0].sel = gpr;
508 alu.src[0].chan = (base_chan - (i % 2));
509
510 alu.src[1].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
511
512 alu.bank_swizzle_force = SQ_ALU_VEC_210;
513 if ((i % 4) == 3)
514 alu.last = 1;
515 r = r600_bytecode_add_alu(ctx->bc, &alu);
516 if (r)
517 return r;
518 }
519 return 0;
520 }
521
522 static int evergreen_interp_flat(struct r600_shader_ctx *ctx, int input)
523 {
524 int i, r;
525 struct r600_bytecode_alu alu;
526
527 for (i = 0; i < 4; i++) {
528 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
529
530 alu.op = ALU_OP1_INTERP_LOAD_P0;
531
532 alu.dst.sel = ctx->shader->input[input].gpr;
533 alu.dst.write = 1;
534
535 alu.dst.chan = i;
536
537 alu.src[0].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
538 alu.src[0].chan = i;
539
540 if (i == 3)
541 alu.last = 1;
542 r = r600_bytecode_add_alu(ctx->bc, &alu);
543 if (r)
544 return r;
545 }
546 return 0;
547 }
548
549 /*
550 * Special export handling in shaders
551 *
552 * shader export ARRAY_BASE for EXPORT_POS:
553 * 60 is position
554 * 61 is misc vector
555 * 62, 63 are clip distance vectors
556 *
557 * The use of the values exported in 61-63 are controlled by PA_CL_VS_OUT_CNTL:
558 * VS_OUT_MISC_VEC_ENA - enables the use of all fields in export 61
559 * USE_VTX_POINT_SIZE - point size in the X channel of export 61
560 * USE_VTX_EDGE_FLAG - edge flag in the Y channel of export 61
561 * USE_VTX_RENDER_TARGET_INDX - render target index in the Z channel of export 61
562 * USE_VTX_VIEWPORT_INDX - viewport index in the W channel of export 61
563 * USE_VTX_KILL_FLAG - kill flag in the Z channel of export 61 (mutually
564 * exclusive from render target index)
565 * VS_OUT_CCDIST0_VEC_ENA/VS_OUT_CCDIST1_VEC_ENA - enable clip distance vectors
566 *
567 *
568 * shader export ARRAY_BASE for EXPORT_PIXEL:
569 * 0-7 CB targets
570 * 61 computed Z vector
571 *
572 * The use of the values exported in the computed Z vector are controlled
573 * by DB_SHADER_CONTROL:
574 * Z_EXPORT_ENABLE - Z as a float in RED
575 * STENCIL_REF_EXPORT_ENABLE - stencil ref as int in GREEN
576 * COVERAGE_TO_MASK_ENABLE - alpha to mask in ALPHA
577 * MASK_EXPORT_ENABLE - pixel sample mask in BLUE
578 * DB_SOURCE_FORMAT - export control restrictions
579 *
580 */
581
582
583 /* Map name/sid pair from tgsi to the 8-bit semantic index for SPI setup */
584 static int r600_spi_sid(struct r600_shader_io * io)
585 {
586 int index, name = io->name;
587
588 /* These params are handled differently, they don't need
589 * semantic indices, so we'll use 0 for them.
590 */
591 if (name == TGSI_SEMANTIC_POSITION ||
592 name == TGSI_SEMANTIC_PSIZE ||
593 name == TGSI_SEMANTIC_EDGEFLAG ||
594 name == TGSI_SEMANTIC_FACE ||
595 name == TGSI_SEMANTIC_SAMPLEMASK)
596 index = 0;
597 else {
598 if (name == TGSI_SEMANTIC_GENERIC) {
599 /* For generic params simply use sid from tgsi */
600 index = io->sid;
601 } else {
602 /* For non-generic params - pack name and sid into 8 bits */
603 index = 0x80 | (name<<3) | (io->sid);
604 }
605
606 /* Make sure that all really used indices have nonzero value, so
607 * we can just compare it to 0 later instead of comparing the name
608 * with different values to detect special cases. */
609 index++;
610 }
611
612 return index;
613 };
614
615 /* we need this to get a common lds index for vs/tcs/tes input/outputs */
616 int r600_get_lds_unique_index(unsigned semantic_name, unsigned index)
617 {
618 switch (semantic_name) {
619 case TGSI_SEMANTIC_POSITION:
620 return 0;
621 case TGSI_SEMANTIC_PSIZE:
622 return 1;
623 case TGSI_SEMANTIC_CLIPDIST:
624 assert(index <= 1);
625 return 2 + index;
626 case TGSI_SEMANTIC_GENERIC:
627 if (index <= 63-4)
628 return 4 + index - 9;
629 else
630 /* same explanation as in the default statement,
631 * the only user hitting this is st/nine.
632 */
633 return 0;
634
635 /* patch indices are completely separate and thus start from 0 */
636 case TGSI_SEMANTIC_TESSOUTER:
637 return 0;
638 case TGSI_SEMANTIC_TESSINNER:
639 return 1;
640 case TGSI_SEMANTIC_PATCH:
641 return 2 + index;
642
643 default:
644 /* Don't fail here. The result of this function is only used
645 * for LS, TCS, TES, and GS, where legacy GL semantics can't
646 * occur, but this function is called for all vertex shaders
647 * before it's known whether LS will be compiled or not.
648 */
649 return 0;
650 }
651 }
652
653 /* turn input into interpolate on EG */
654 static int evergreen_interp_input(struct r600_shader_ctx *ctx, int index)
655 {
656 int r = 0;
657
658 if (ctx->shader->input[index].spi_sid) {
659 ctx->shader->input[index].lds_pos = ctx->shader->nlds++;
660 if (ctx->shader->input[index].interpolate > 0) {
661 evergreen_interp_assign_ij_index(ctx, index);
662 r = evergreen_interp_alu(ctx, index);
663 } else {
664 r = evergreen_interp_flat(ctx, index);
665 }
666 }
667 return r;
668 }
669
670 static int select_twoside_color(struct r600_shader_ctx *ctx, int front, int back)
671 {
672 struct r600_bytecode_alu alu;
673 int i, r;
674 int gpr_front = ctx->shader->input[front].gpr;
675 int gpr_back = ctx->shader->input[back].gpr;
676
677 for (i = 0; i < 4; i++) {
678 memset(&alu, 0, sizeof(alu));
679 alu.op = ALU_OP3_CNDGT;
680 alu.is_op3 = 1;
681 alu.dst.write = 1;
682 alu.dst.sel = gpr_front;
683 alu.src[0].sel = ctx->face_gpr;
684 alu.src[1].sel = gpr_front;
685 alu.src[2].sel = gpr_back;
686
687 alu.dst.chan = i;
688 alu.src[1].chan = i;
689 alu.src[2].chan = i;
690 alu.last = (i==3);
691
692 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
693 return r;
694 }
695
696 return 0;
697 }
698
699 /* execute a single slot ALU calculation */
700 static int single_alu_op2(struct r600_shader_ctx *ctx, int op,
701 int dst_sel, int dst_chan,
702 int src0_sel, unsigned src0_chan_val,
703 int src1_sel, unsigned src1_chan_val)
704 {
705 struct r600_bytecode_alu alu;
706 int r, i;
707
708 if (ctx->bc->chip_class == CAYMAN && op == ALU_OP2_MULLO_INT) {
709 for (i = 0; i < 4; i++) {
710 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
711 alu.op = op;
712 alu.src[0].sel = src0_sel;
713 if (src0_sel == V_SQ_ALU_SRC_LITERAL)
714 alu.src[0].value = src0_chan_val;
715 else
716 alu.src[0].chan = src0_chan_val;
717 alu.src[1].sel = src1_sel;
718 if (src1_sel == V_SQ_ALU_SRC_LITERAL)
719 alu.src[1].value = src1_chan_val;
720 else
721 alu.src[1].chan = src1_chan_val;
722 alu.dst.sel = dst_sel;
723 alu.dst.chan = i;
724 alu.dst.write = i == dst_chan;
725 alu.last = (i == 3);
726 r = r600_bytecode_add_alu(ctx->bc, &alu);
727 if (r)
728 return r;
729 }
730 return 0;
731 }
732
733 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
734 alu.op = op;
735 alu.src[0].sel = src0_sel;
736 if (src0_sel == V_SQ_ALU_SRC_LITERAL)
737 alu.src[0].value = src0_chan_val;
738 else
739 alu.src[0].chan = src0_chan_val;
740 alu.src[1].sel = src1_sel;
741 if (src1_sel == V_SQ_ALU_SRC_LITERAL)
742 alu.src[1].value = src1_chan_val;
743 else
744 alu.src[1].chan = src1_chan_val;
745 alu.dst.sel = dst_sel;
746 alu.dst.chan = dst_chan;
747 alu.dst.write = 1;
748 alu.last = 1;
749 r = r600_bytecode_add_alu(ctx->bc, &alu);
750 if (r)
751 return r;
752 return 0;
753 }
754
755 /* execute a single slot ALU calculation */
756 static int single_alu_op3(struct r600_shader_ctx *ctx, int op,
757 int dst_sel, int dst_chan,
758 int src0_sel, unsigned src0_chan_val,
759 int src1_sel, unsigned src1_chan_val,
760 int src2_sel, unsigned src2_chan_val)
761 {
762 struct r600_bytecode_alu alu;
763 int r;
764
765 /* validate this for other ops */
766 assert(op == ALU_OP3_MULADD_UINT24 || op == ALU_OP3_CNDE_INT);
767 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
768 alu.op = op;
769 alu.src[0].sel = src0_sel;
770 if (src0_sel == V_SQ_ALU_SRC_LITERAL)
771 alu.src[0].value = src0_chan_val;
772 else
773 alu.src[0].chan = src0_chan_val;
774 alu.src[1].sel = src1_sel;
775 if (src1_sel == V_SQ_ALU_SRC_LITERAL)
776 alu.src[1].value = src1_chan_val;
777 else
778 alu.src[1].chan = src1_chan_val;
779 alu.src[2].sel = src2_sel;
780 if (src2_sel == V_SQ_ALU_SRC_LITERAL)
781 alu.src[2].value = src2_chan_val;
782 else
783 alu.src[2].chan = src2_chan_val;
784 alu.dst.sel = dst_sel;
785 alu.dst.chan = dst_chan;
786 alu.is_op3 = 1;
787 alu.last = 1;
788 r = r600_bytecode_add_alu(ctx->bc, &alu);
789 if (r)
790 return r;
791 return 0;
792 }
793
794 /* put it in temp_reg.x */
795 static int get_lds_offset0(struct r600_shader_ctx *ctx,
796 int rel_patch_chan,
797 int temp_reg, bool is_patch_var)
798 {
799 int r;
800
801 /* MUL temp.x, patch_stride (input_vals.x), rel_patch_id (r0.y (tcs)) */
802 /* ADD
803 Dimension - patch0_offset (input_vals.z),
804 Non-dim - patch0_data_offset (input_vals.w)
805 */
806 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
807 temp_reg, 0,
808 ctx->tess_output_info, 0,
809 0, rel_patch_chan,
810 ctx->tess_output_info, is_patch_var ? 3 : 2);
811 if (r)
812 return r;
813 return 0;
814 }
815
816 static inline int get_address_file_reg(struct r600_shader_ctx *ctx, int index)
817 {
818 return index > 0 ? ctx->bc->index_reg[index - 1] : ctx->bc->ar_reg;
819 }
820
821 static int r600_get_temp(struct r600_shader_ctx *ctx)
822 {
823 return ctx->temp_reg + ctx->max_driver_temp_used++;
824 }
825
826 static int vs_add_primid_output(struct r600_shader_ctx *ctx, int prim_id_sid)
827 {
828 int i;
829 i = ctx->shader->noutput++;
830 ctx->shader->output[i].name = TGSI_SEMANTIC_PRIMID;
831 ctx->shader->output[i].sid = 0;
832 ctx->shader->output[i].gpr = 0;
833 ctx->shader->output[i].interpolate = TGSI_INTERPOLATE_CONSTANT;
834 ctx->shader->output[i].write_mask = 0x4;
835 ctx->shader->output[i].spi_sid = prim_id_sid;
836
837 return 0;
838 }
839
840 static int tgsi_barrier(struct r600_shader_ctx *ctx)
841 {
842 struct r600_bytecode_alu alu;
843 int r;
844
845 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
846 alu.op = ctx->inst_info->op;
847 alu.last = 1;
848
849 r = r600_bytecode_add_alu(ctx->bc, &alu);
850 if (r)
851 return r;
852 return 0;
853 }
854
855 static int tgsi_declaration(struct r600_shader_ctx *ctx)
856 {
857 struct tgsi_full_declaration *d = &ctx->parse.FullToken.FullDeclaration;
858 int r, i, j, count = d->Range.Last - d->Range.First + 1;
859
860 switch (d->Declaration.File) {
861 case TGSI_FILE_INPUT:
862 for (j = 0; j < count; j++) {
863 i = ctx->shader->ninput + j;
864 assert(i < ARRAY_SIZE(ctx->shader->input));
865 ctx->shader->input[i].name = d->Semantic.Name;
866 ctx->shader->input[i].sid = d->Semantic.Index + j;
867 ctx->shader->input[i].interpolate = d->Interp.Interpolate;
868 ctx->shader->input[i].interpolate_location = d->Interp.Location;
869 ctx->shader->input[i].gpr = ctx->file_offset[TGSI_FILE_INPUT] + d->Range.First + j;
870 if (ctx->type == PIPE_SHADER_FRAGMENT) {
871 ctx->shader->input[i].spi_sid = r600_spi_sid(&ctx->shader->input[i]);
872 switch (ctx->shader->input[i].name) {
873 case TGSI_SEMANTIC_FACE:
874 if (ctx->face_gpr != -1)
875 ctx->shader->input[i].gpr = ctx->face_gpr; /* already allocated by allocate_system_value_inputs */
876 else
877 ctx->face_gpr = ctx->shader->input[i].gpr;
878 break;
879 case TGSI_SEMANTIC_COLOR:
880 ctx->colors_used++;
881 break;
882 case TGSI_SEMANTIC_POSITION:
883 ctx->fragcoord_input = i;
884 break;
885 case TGSI_SEMANTIC_PRIMID:
886 /* set this for now */
887 ctx->shader->gs_prim_id_input = true;
888 ctx->shader->ps_prim_id_input = i;
889 break;
890 }
891 if (ctx->bc->chip_class >= EVERGREEN) {
892 if ((r = evergreen_interp_input(ctx, i)))
893 return r;
894 }
895 } else if (ctx->type == PIPE_SHADER_GEOMETRY) {
896 /* FIXME probably skip inputs if they aren't passed in the ring */
897 ctx->shader->input[i].ring_offset = ctx->next_ring_offset;
898 ctx->next_ring_offset += 16;
899 if (ctx->shader->input[i].name == TGSI_SEMANTIC_PRIMID)
900 ctx->shader->gs_prim_id_input = true;
901 }
902 }
903 ctx->shader->ninput += count;
904 break;
905 case TGSI_FILE_OUTPUT:
906 for (j = 0; j < count; j++) {
907 i = ctx->shader->noutput + j;
908 assert(i < ARRAY_SIZE(ctx->shader->output));
909 ctx->shader->output[i].name = d->Semantic.Name;
910 ctx->shader->output[i].sid = d->Semantic.Index + j;
911 ctx->shader->output[i].gpr = ctx->file_offset[TGSI_FILE_OUTPUT] + d->Range.First + j;
912 ctx->shader->output[i].interpolate = d->Interp.Interpolate;
913 ctx->shader->output[i].write_mask = d->Declaration.UsageMask;
914 if (ctx->type == PIPE_SHADER_VERTEX ||
915 ctx->type == PIPE_SHADER_GEOMETRY ||
916 ctx->type == PIPE_SHADER_TESS_EVAL) {
917 ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);
918 switch (d->Semantic.Name) {
919 case TGSI_SEMANTIC_CLIPDIST:
920 break;
921 case TGSI_SEMANTIC_PSIZE:
922 ctx->shader->vs_out_misc_write = 1;
923 ctx->shader->vs_out_point_size = 1;
924 break;
925 case TGSI_SEMANTIC_EDGEFLAG:
926 ctx->shader->vs_out_misc_write = 1;
927 ctx->shader->vs_out_edgeflag = 1;
928 ctx->edgeflag_output = i;
929 break;
930 case TGSI_SEMANTIC_VIEWPORT_INDEX:
931 ctx->shader->vs_out_misc_write = 1;
932 ctx->shader->vs_out_viewport = 1;
933 break;
934 case TGSI_SEMANTIC_LAYER:
935 ctx->shader->vs_out_misc_write = 1;
936 ctx->shader->vs_out_layer = 1;
937 break;
938 case TGSI_SEMANTIC_CLIPVERTEX:
939 ctx->clip_vertex_write = TRUE;
940 ctx->cv_output = i;
941 break;
942 }
943 if (ctx->type == PIPE_SHADER_GEOMETRY) {
944 ctx->gs_out_ring_offset += 16;
945 }
946 } else if (ctx->type == PIPE_SHADER_FRAGMENT) {
947 switch (d->Semantic.Name) {
948 case TGSI_SEMANTIC_COLOR:
949 ctx->shader->nr_ps_max_color_exports++;
950 break;
951 }
952 }
953 }
954 ctx->shader->noutput += count;
955 break;
956 case TGSI_FILE_TEMPORARY:
957 if (ctx->info.indirect_files & (1 << TGSI_FILE_TEMPORARY)) {
958 if (d->Array.ArrayID) {
959 r600_add_gpr_array(ctx->shader,
960 ctx->file_offset[TGSI_FILE_TEMPORARY] +
961 d->Range.First,
962 d->Range.Last - d->Range.First + 1, 0x0F);
963 }
964 }
965 break;
966
967 case TGSI_FILE_CONSTANT:
968 case TGSI_FILE_SAMPLER:
969 case TGSI_FILE_SAMPLER_VIEW:
970 case TGSI_FILE_ADDRESS:
971 case TGSI_FILE_IMAGE:
972 break;
973
974 case TGSI_FILE_HW_ATOMIC:
975 i = ctx->shader->nhwatomic_ranges;
976 ctx->shader->atomics[i].start = d->Range.First;
977 ctx->shader->atomics[i].end = d->Range.Last;
978 ctx->shader->atomics[i].hw_idx = ctx->shader->atomic_base + ctx->shader->nhwatomic;
979 ctx->shader->atomics[i].array_id = d->Array.ArrayID;
980 ctx->shader->atomics[i].buffer_id = d->Dim.Index2D;
981 ctx->shader->nhwatomic_ranges++;
982 ctx->shader->nhwatomic += count;
983 break;
984
985 case TGSI_FILE_SYSTEM_VALUE:
986 if (d->Semantic.Name == TGSI_SEMANTIC_SAMPLEMASK ||
987 d->Semantic.Name == TGSI_SEMANTIC_SAMPLEID ||
988 d->Semantic.Name == TGSI_SEMANTIC_SAMPLEPOS) {
989 break; /* Already handled from allocate_system_value_inputs */
990 } else if (d->Semantic.Name == TGSI_SEMANTIC_INSTANCEID) {
991 if (!ctx->native_integers) {
992 struct r600_bytecode_alu alu;
993 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
994
995 alu.op = ALU_OP1_INT_TO_FLT;
996 alu.src[0].sel = 0;
997 alu.src[0].chan = 3;
998
999 alu.dst.sel = 0;
1000 alu.dst.chan = 3;
1001 alu.dst.write = 1;
1002 alu.last = 1;
1003
1004 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1005 return r;
1006 }
1007 break;
1008 } else if (d->Semantic.Name == TGSI_SEMANTIC_VERTEXID)
1009 break;
1010 else if (d->Semantic.Name == TGSI_SEMANTIC_INVOCATIONID)
1011 break;
1012 else if (d->Semantic.Name == TGSI_SEMANTIC_TESSINNER ||
1013 d->Semantic.Name == TGSI_SEMANTIC_TESSOUTER) {
1014 int param = r600_get_lds_unique_index(d->Semantic.Name, 0);
1015 int dreg = d->Semantic.Name == TGSI_SEMANTIC_TESSINNER ? 3 : 2;
1016 unsigned temp_reg = r600_get_temp(ctx);
1017
1018 r = get_lds_offset0(ctx, 2, temp_reg, true);
1019 if (r)
1020 return r;
1021
1022 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
1023 temp_reg, 0,
1024 temp_reg, 0,
1025 V_SQ_ALU_SRC_LITERAL, param * 16);
1026 if (r)
1027 return r;
1028
1029 do_lds_fetch_values(ctx, temp_reg, dreg);
1030 }
1031 else if (d->Semantic.Name == TGSI_SEMANTIC_TESSCOORD) {
1032 /* MOV r1.x, r0.x;
1033 MOV r1.y, r0.y;
1034 */
1035 for (i = 0; i < 2; i++) {
1036 struct r600_bytecode_alu alu;
1037 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1038 alu.op = ALU_OP1_MOV;
1039 alu.src[0].sel = 0;
1040 alu.src[0].chan = 0 + i;
1041 alu.dst.sel = 1;
1042 alu.dst.chan = 0 + i;
1043 alu.dst.write = 1;
1044 alu.last = (i == 1) ? 1 : 0;
1045 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1046 return r;
1047 }
1048 /* ADD r1.z, 1.0f, -r0.x */
1049 struct r600_bytecode_alu alu;
1050 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1051 alu.op = ALU_OP2_ADD;
1052 alu.src[0].sel = V_SQ_ALU_SRC_1;
1053 alu.src[1].sel = 1;
1054 alu.src[1].chan = 0;
1055 alu.src[1].neg = 1;
1056 alu.dst.sel = 1;
1057 alu.dst.chan = 2;
1058 alu.dst.write = 1;
1059 alu.last = 1;
1060 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1061 return r;
1062
1063 /* ADD r1.z, r1.z, -r1.y */
1064 alu.op = ALU_OP2_ADD;
1065 alu.src[0].sel = 1;
1066 alu.src[0].chan = 2;
1067 alu.src[1].sel = 1;
1068 alu.src[1].chan = 1;
1069 alu.src[1].neg = 1;
1070 alu.dst.sel = 1;
1071 alu.dst.chan = 2;
1072 alu.dst.write = 1;
1073 alu.last = 1;
1074 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1075 return r;
1076 break;
1077 }
1078 break;
1079 default:
1080 R600_ERR("unsupported file %d declaration\n", d->Declaration.File);
1081 return -EINVAL;
1082 }
1083 return 0;
1084 }
1085
1086 static int allocate_system_value_inputs(struct r600_shader_ctx *ctx, int gpr_offset)
1087 {
1088 struct tgsi_parse_context parse;
1089 struct {
1090 boolean enabled;
1091 int *reg;
1092 unsigned name, alternate_name;
1093 } inputs[2] = {
1094 { false, &ctx->face_gpr, TGSI_SEMANTIC_SAMPLEMASK, ~0u }, /* lives in Front Face GPR.z */
1095
1096 { false, &ctx->fixed_pt_position_gpr, TGSI_SEMANTIC_SAMPLEID, TGSI_SEMANTIC_SAMPLEPOS } /* SAMPLEID is in Fixed Point Position GPR.w */
1097 };
1098 int i, k, num_regs = 0;
1099
1100 if (tgsi_parse_init(&parse, ctx->tokens) != TGSI_PARSE_OK) {
1101 return 0;
1102 }
1103
1104 /* need to scan shader for system values and interpolateAtSample/Offset/Centroid */
1105 while (!tgsi_parse_end_of_tokens(&parse)) {
1106 tgsi_parse_token(&parse);
1107
1108 if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_INSTRUCTION) {
1109 const struct tgsi_full_instruction *inst = &parse.FullToken.FullInstruction;
1110 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE ||
1111 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
1112 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_CENTROID)
1113 {
1114 int interpolate, location, k;
1115
1116 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
1117 location = TGSI_INTERPOLATE_LOC_CENTER;
1118 inputs[1].enabled = true; /* needs SAMPLEID */
1119 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
1120 location = TGSI_INTERPOLATE_LOC_CENTER;
1121 /* Needs sample positions, currently those are always available */
1122 } else {
1123 location = TGSI_INTERPOLATE_LOC_CENTROID;
1124 }
1125
1126 interpolate = ctx->info.input_interpolate[inst->Src[0].Register.Index];
1127 k = eg_get_interpolator_index(interpolate, location);
1128 if (k >= 0)
1129 ctx->eg_interpolators[k].enabled = true;
1130 }
1131 } else if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_DECLARATION) {
1132 struct tgsi_full_declaration *d = &parse.FullToken.FullDeclaration;
1133 if (d->Declaration.File == TGSI_FILE_SYSTEM_VALUE) {
1134 for (k = 0; k < ARRAY_SIZE(inputs); k++) {
1135 if (d->Semantic.Name == inputs[k].name ||
1136 d->Semantic.Name == inputs[k].alternate_name) {
1137 inputs[k].enabled = true;
1138 }
1139 }
1140 }
1141 }
1142 }
1143
1144 tgsi_parse_free(&parse);
1145
1146 for (i = 0; i < ARRAY_SIZE(inputs); i++) {
1147 boolean enabled = inputs[i].enabled;
1148 int *reg = inputs[i].reg;
1149 unsigned name = inputs[i].name;
1150
1151 if (enabled) {
1152 int gpr = gpr_offset + num_regs++;
1153 ctx->shader->nsys_inputs++;
1154
1155 // add to inputs, allocate a gpr
1156 k = ctx->shader->ninput++;
1157 ctx->shader->input[k].name = name;
1158 ctx->shader->input[k].sid = 0;
1159 ctx->shader->input[k].interpolate = TGSI_INTERPOLATE_CONSTANT;
1160 ctx->shader->input[k].interpolate_location = TGSI_INTERPOLATE_LOC_CENTER;
1161 *reg = ctx->shader->input[k].gpr = gpr;
1162 }
1163 }
1164
1165 return gpr_offset + num_regs;
1166 }
1167
1168 /*
1169 * for evergreen we need to scan the shader to find the number of GPRs we need to
1170 * reserve for interpolation and system values
1171 *
1172 * we need to know if we are going to emit
1173 * any sample or centroid inputs
1174 * if perspective and linear are required
1175 */
1176 static int evergreen_gpr_count(struct r600_shader_ctx *ctx)
1177 {
1178 unsigned i;
1179 int num_baryc;
1180 struct tgsi_parse_context parse;
1181
1182 memset(&ctx->eg_interpolators, 0, sizeof(ctx->eg_interpolators));
1183
1184 for (i = 0; i < ctx->info.num_inputs; i++) {
1185 int k;
1186 /* skip position/face/mask/sampleid */
1187 if (ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_POSITION ||
1188 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_FACE ||
1189 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_SAMPLEMASK ||
1190 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_SAMPLEID)
1191 continue;
1192
1193 k = eg_get_interpolator_index(
1194 ctx->info.input_interpolate[i],
1195 ctx->info.input_interpolate_loc[i]);
1196 if (k >= 0)
1197 ctx->eg_interpolators[k].enabled = TRUE;
1198 }
1199
1200 if (tgsi_parse_init(&parse, ctx->tokens) != TGSI_PARSE_OK) {
1201 return 0;
1202 }
1203
1204 /* need to scan shader for system values and interpolateAtSample/Offset/Centroid */
1205 while (!tgsi_parse_end_of_tokens(&parse)) {
1206 tgsi_parse_token(&parse);
1207
1208 if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_INSTRUCTION) {
1209 const struct tgsi_full_instruction *inst = &parse.FullToken.FullInstruction;
1210 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE ||
1211 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
1212 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_CENTROID)
1213 {
1214 int interpolate, location, k;
1215
1216 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
1217 location = TGSI_INTERPOLATE_LOC_CENTER;
1218 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
1219 location = TGSI_INTERPOLATE_LOC_CENTER;
1220 } else {
1221 location = TGSI_INTERPOLATE_LOC_CENTROID;
1222 }
1223
1224 interpolate = ctx->info.input_interpolate[inst->Src[0].Register.Index];
1225 k = eg_get_interpolator_index(interpolate, location);
1226 if (k >= 0)
1227 ctx->eg_interpolators[k].enabled = true;
1228 }
1229 }
1230 }
1231
1232 tgsi_parse_free(&parse);
1233
1234 /* assign gpr to each interpolator according to priority */
1235 num_baryc = 0;
1236 for (i = 0; i < ARRAY_SIZE(ctx->eg_interpolators); i++) {
1237 if (ctx->eg_interpolators[i].enabled) {
1238 ctx->eg_interpolators[i].ij_index = num_baryc;
1239 num_baryc ++;
1240 }
1241 }
1242
1243 /* XXX PULL MODEL and LINE STIPPLE */
1244
1245 num_baryc = (num_baryc + 1) >> 1;
1246 return allocate_system_value_inputs(ctx, num_baryc);
1247 }
1248
1249 /* sample_id_sel == NULL means fetch for current sample */
1250 static int load_sample_position(struct r600_shader_ctx *ctx, struct r600_shader_src *sample_id, int chan_sel)
1251 {
1252 struct r600_bytecode_vtx vtx;
1253 int r, t1;
1254
1255 assert(ctx->fixed_pt_position_gpr != -1);
1256
1257 t1 = r600_get_temp(ctx);
1258
1259 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
1260 vtx.op = FETCH_OP_VFETCH;
1261 vtx.buffer_id = R600_BUFFER_INFO_CONST_BUFFER;
1262 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1263 if (sample_id == NULL) {
1264 vtx.src_gpr = ctx->fixed_pt_position_gpr; // SAMPLEID is in .w;
1265 vtx.src_sel_x = 3;
1266 }
1267 else {
1268 struct r600_bytecode_alu alu;
1269
1270 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1271 alu.op = ALU_OP1_MOV;
1272 r600_bytecode_src(&alu.src[0], sample_id, chan_sel);
1273 alu.dst.sel = t1;
1274 alu.dst.write = 1;
1275 alu.last = 1;
1276 r = r600_bytecode_add_alu(ctx->bc, &alu);
1277 if (r)
1278 return r;
1279
1280 vtx.src_gpr = t1;
1281 vtx.src_sel_x = 0;
1282 }
1283 vtx.mega_fetch_count = 16;
1284 vtx.dst_gpr = t1;
1285 vtx.dst_sel_x = 0;
1286 vtx.dst_sel_y = 1;
1287 vtx.dst_sel_z = 2;
1288 vtx.dst_sel_w = 3;
1289 vtx.data_format = FMT_32_32_32_32_FLOAT;
1290 vtx.num_format_all = 2;
1291 vtx.format_comp_all = 1;
1292 vtx.use_const_fields = 0;
1293 vtx.offset = 1; // first element is size of buffer
1294 vtx.endian = r600_endian_swap(32);
1295 vtx.srf_mode_all = 1; /* SRF_MODE_NO_ZERO */
1296
1297 r = r600_bytecode_add_vtx(ctx->bc, &vtx);
1298 if (r)
1299 return r;
1300
1301 return t1;
1302 }
1303
1304 static void tgsi_src(struct r600_shader_ctx *ctx,
1305 const struct tgsi_full_src_register *tgsi_src,
1306 struct r600_shader_src *r600_src)
1307 {
1308 memset(r600_src, 0, sizeof(*r600_src));
1309 r600_src->swizzle[0] = tgsi_src->Register.SwizzleX;
1310 r600_src->swizzle[1] = tgsi_src->Register.SwizzleY;
1311 r600_src->swizzle[2] = tgsi_src->Register.SwizzleZ;
1312 r600_src->swizzle[3] = tgsi_src->Register.SwizzleW;
1313 r600_src->neg = tgsi_src->Register.Negate;
1314 r600_src->abs = tgsi_src->Register.Absolute;
1315
1316 if (tgsi_src->Register.File == TGSI_FILE_IMMEDIATE) {
1317 int index;
1318 if ((tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleY) &&
1319 (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleZ) &&
1320 (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleW)) {
1321
1322 index = tgsi_src->Register.Index * 4 + tgsi_src->Register.SwizzleX;
1323 r600_bytecode_special_constants(ctx->literals[index], &r600_src->sel, &r600_src->neg, r600_src->abs);
1324 if (r600_src->sel != V_SQ_ALU_SRC_LITERAL)
1325 return;
1326 }
1327 index = tgsi_src->Register.Index;
1328 r600_src->sel = V_SQ_ALU_SRC_LITERAL;
1329 memcpy(r600_src->value, ctx->literals + index * 4, sizeof(r600_src->value));
1330 } else if (tgsi_src->Register.File == TGSI_FILE_SYSTEM_VALUE) {
1331 if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEMASK) {
1332 r600_src->swizzle[0] = 2; // Z value
1333 r600_src->swizzle[1] = 2;
1334 r600_src->swizzle[2] = 2;
1335 r600_src->swizzle[3] = 2;
1336 r600_src->sel = ctx->face_gpr;
1337 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEID) {
1338 r600_src->swizzle[0] = 3; // W value
1339 r600_src->swizzle[1] = 3;
1340 r600_src->swizzle[2] = 3;
1341 r600_src->swizzle[3] = 3;
1342 r600_src->sel = ctx->fixed_pt_position_gpr;
1343 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEPOS) {
1344 r600_src->swizzle[0] = 0;
1345 r600_src->swizzle[1] = 1;
1346 r600_src->swizzle[2] = 4;
1347 r600_src->swizzle[3] = 4;
1348 r600_src->sel = load_sample_position(ctx, NULL, -1);
1349 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INSTANCEID) {
1350 r600_src->swizzle[0] = 3;
1351 r600_src->swizzle[1] = 3;
1352 r600_src->swizzle[2] = 3;
1353 r600_src->swizzle[3] = 3;
1354 r600_src->sel = 0;
1355 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_VERTEXID) {
1356 r600_src->swizzle[0] = 0;
1357 r600_src->swizzle[1] = 0;
1358 r600_src->swizzle[2] = 0;
1359 r600_src->swizzle[3] = 0;
1360 r600_src->sel = 0;
1361 } else if (ctx->type != PIPE_SHADER_TESS_CTRL && ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INVOCATIONID) {
1362 r600_src->swizzle[0] = 3;
1363 r600_src->swizzle[1] = 3;
1364 r600_src->swizzle[2] = 3;
1365 r600_src->swizzle[3] = 3;
1366 r600_src->sel = 1;
1367 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INVOCATIONID) {
1368 r600_src->swizzle[0] = 2;
1369 r600_src->swizzle[1] = 2;
1370 r600_src->swizzle[2] = 2;
1371 r600_src->swizzle[3] = 2;
1372 r600_src->sel = 0;
1373 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_TESSCOORD) {
1374 r600_src->sel = 1;
1375 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_TESSINNER) {
1376 r600_src->sel = 3;
1377 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_TESSOUTER) {
1378 r600_src->sel = 2;
1379 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_VERTICESIN) {
1380 if (ctx->type == PIPE_SHADER_TESS_CTRL) {
1381 r600_src->sel = ctx->tess_input_info;
1382 r600_src->swizzle[0] = 2;
1383 r600_src->swizzle[1] = 2;
1384 r600_src->swizzle[2] = 2;
1385 r600_src->swizzle[3] = 2;
1386 } else {
1387 r600_src->sel = ctx->tess_input_info;
1388 r600_src->swizzle[0] = 3;
1389 r600_src->swizzle[1] = 3;
1390 r600_src->swizzle[2] = 3;
1391 r600_src->swizzle[3] = 3;
1392 }
1393 } else if (ctx->type == PIPE_SHADER_TESS_CTRL && ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_PRIMID) {
1394 r600_src->sel = 0;
1395 r600_src->swizzle[0] = 0;
1396 r600_src->swizzle[1] = 0;
1397 r600_src->swizzle[2] = 0;
1398 r600_src->swizzle[3] = 0;
1399 } else if (ctx->type == PIPE_SHADER_TESS_EVAL && ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_PRIMID) {
1400 r600_src->sel = 0;
1401 r600_src->swizzle[0] = 3;
1402 r600_src->swizzle[1] = 3;
1403 r600_src->swizzle[2] = 3;
1404 r600_src->swizzle[3] = 3;
1405 }
1406 } else {
1407 if (tgsi_src->Register.Indirect)
1408 r600_src->rel = V_SQ_REL_RELATIVE;
1409 r600_src->sel = tgsi_src->Register.Index;
1410 r600_src->sel += ctx->file_offset[tgsi_src->Register.File];
1411 }
1412 if (tgsi_src->Register.File == TGSI_FILE_CONSTANT) {
1413 if (tgsi_src->Register.Dimension) {
1414 r600_src->kc_bank = tgsi_src->Dimension.Index;
1415 if (tgsi_src->Dimension.Indirect) {
1416 r600_src->kc_rel = 1;
1417 }
1418 }
1419 }
1420 }
1421
1422 static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx,
1423 unsigned int cb_idx, unsigned cb_rel, unsigned int offset, unsigned ar_chan,
1424 unsigned int dst_reg)
1425 {
1426 struct r600_bytecode_vtx vtx;
1427 unsigned int ar_reg;
1428 int r;
1429
1430 if (offset) {
1431 struct r600_bytecode_alu alu;
1432
1433 memset(&alu, 0, sizeof(alu));
1434
1435 alu.op = ALU_OP2_ADD_INT;
1436 alu.src[0].sel = ctx->bc->ar_reg;
1437 alu.src[0].chan = ar_chan;
1438
1439 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
1440 alu.src[1].value = offset;
1441
1442 alu.dst.sel = dst_reg;
1443 alu.dst.chan = ar_chan;
1444 alu.dst.write = 1;
1445 alu.last = 1;
1446
1447 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1448 return r;
1449
1450 ar_reg = dst_reg;
1451 } else {
1452 ar_reg = ctx->bc->ar_reg;
1453 }
1454
1455 memset(&vtx, 0, sizeof(vtx));
1456 vtx.buffer_id = cb_idx;
1457 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1458 vtx.src_gpr = ar_reg;
1459 vtx.src_sel_x = ar_chan;
1460 vtx.mega_fetch_count = 16;
1461 vtx.dst_gpr = dst_reg;
1462 vtx.dst_sel_x = 0; /* SEL_X */
1463 vtx.dst_sel_y = 1; /* SEL_Y */
1464 vtx.dst_sel_z = 2; /* SEL_Z */
1465 vtx.dst_sel_w = 3; /* SEL_W */
1466 vtx.data_format = FMT_32_32_32_32_FLOAT;
1467 vtx.num_format_all = 2; /* NUM_FORMAT_SCALED */
1468 vtx.format_comp_all = 1; /* FORMAT_COMP_SIGNED */
1469 vtx.endian = r600_endian_swap(32);
1470 vtx.buffer_index_mode = cb_rel; // cb_rel ? V_SQ_CF_INDEX_0 : V_SQ_CF_INDEX_NONE;
1471
1472 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
1473 return r;
1474
1475 return 0;
1476 }
1477
1478 static int fetch_gs_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg)
1479 {
1480 struct r600_bytecode_vtx vtx;
1481 int r;
1482 unsigned index = src->Register.Index;
1483 unsigned vtx_id = src->Dimension.Index;
1484 int offset_reg = ctx->gs_rotated_input[vtx_id / 3];
1485 int offset_chan = vtx_id % 3;
1486 int t2 = 0;
1487
1488 /* offsets of per-vertex data in ESGS ring are passed to GS in R0.x, R0.y,
1489 * R0.w, R1.x, R1.y, R1.z (it seems R0.z is used for PrimitiveID) */
1490
1491 if (offset_reg == ctx->gs_rotated_input[0] && offset_chan == 2)
1492 offset_chan = 3;
1493
1494 if (src->Dimension.Indirect || src->Register.Indirect)
1495 t2 = r600_get_temp(ctx);
1496
1497 if (src->Dimension.Indirect) {
1498 int treg[3];
1499 struct r600_bytecode_alu alu;
1500 int r, i;
1501 unsigned addr_reg;
1502 addr_reg = get_address_file_reg(ctx, src->DimIndirect.Index);
1503 if (src->DimIndirect.Index > 0) {
1504 r = single_alu_op2(ctx, ALU_OP1_MOV,
1505 ctx->bc->ar_reg, 0,
1506 addr_reg, 0,
1507 0, 0);
1508 if (r)
1509 return r;
1510 }
1511 /*
1512 we have to put the R0.x/y/w into Rt.x Rt+1.x Rt+2.x then index reg from Rt.
1513 at least this is what fglrx seems to do. */
1514 for (i = 0; i < 3; i++) {
1515 treg[i] = r600_get_temp(ctx);
1516 }
1517 r600_add_gpr_array(ctx->shader, treg[0], 3, 0x0F);
1518
1519 for (i = 0; i < 3; i++) {
1520 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1521 alu.op = ALU_OP1_MOV;
1522 alu.src[0].sel = ctx->gs_rotated_input[0];
1523 alu.src[0].chan = i == 2 ? 3 : i;
1524 alu.dst.sel = treg[i];
1525 alu.dst.chan = 0;
1526 alu.dst.write = 1;
1527 alu.last = 1;
1528 r = r600_bytecode_add_alu(ctx->bc, &alu);
1529 if (r)
1530 return r;
1531 }
1532 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1533 alu.op = ALU_OP1_MOV;
1534 alu.src[0].sel = treg[0];
1535 alu.src[0].rel = 1;
1536 alu.dst.sel = t2;
1537 alu.dst.write = 1;
1538 alu.last = 1;
1539 r = r600_bytecode_add_alu(ctx->bc, &alu);
1540 if (r)
1541 return r;
1542 offset_reg = t2;
1543 offset_chan = 0;
1544 }
1545
1546 if (src->Register.Indirect) {
1547 int addr_reg;
1548 unsigned first = ctx->info.input_array_first[src->Indirect.ArrayID];
1549
1550 addr_reg = get_address_file_reg(ctx, src->Indirect.Index);
1551
1552 /* pull the value from index_reg */
1553 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
1554 t2, 1,
1555 addr_reg, 0,
1556 V_SQ_ALU_SRC_LITERAL, first);
1557 if (r)
1558 return r;
1559 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
1560 t2, 0,
1561 t2, 1,
1562 V_SQ_ALU_SRC_LITERAL, 4,
1563 offset_reg, offset_chan);
1564 if (r)
1565 return r;
1566 offset_reg = t2;
1567 offset_chan = 0;
1568 index = src->Register.Index - first;
1569 }
1570
1571 memset(&vtx, 0, sizeof(vtx));
1572 vtx.buffer_id = R600_GS_RING_CONST_BUFFER;
1573 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1574 vtx.src_gpr = offset_reg;
1575 vtx.src_sel_x = offset_chan;
1576 vtx.offset = index * 16; /*bytes*/
1577 vtx.mega_fetch_count = 16;
1578 vtx.dst_gpr = dst_reg;
1579 vtx.dst_sel_x = 0; /* SEL_X */
1580 vtx.dst_sel_y = 1; /* SEL_Y */
1581 vtx.dst_sel_z = 2; /* SEL_Z */
1582 vtx.dst_sel_w = 3; /* SEL_W */
1583 if (ctx->bc->chip_class >= EVERGREEN) {
1584 vtx.use_const_fields = 1;
1585 } else {
1586 vtx.data_format = FMT_32_32_32_32_FLOAT;
1587 }
1588
1589 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
1590 return r;
1591
1592 return 0;
1593 }
1594
1595 static int tgsi_split_gs_inputs(struct r600_shader_ctx *ctx)
1596 {
1597 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1598 unsigned i;
1599
1600 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
1601 struct tgsi_full_src_register *src = &inst->Src[i];
1602
1603 if (src->Register.File == TGSI_FILE_INPUT) {
1604 if (ctx->shader->input[src->Register.Index].name == TGSI_SEMANTIC_PRIMID) {
1605 /* primitive id is in R0.z */
1606 ctx->src[i].sel = 0;
1607 ctx->src[i].swizzle[0] = 2;
1608 }
1609 }
1610 if (src->Register.File == TGSI_FILE_INPUT && src->Register.Dimension) {
1611 int treg = r600_get_temp(ctx);
1612
1613 fetch_gs_input(ctx, src, treg);
1614 ctx->src[i].sel = treg;
1615 ctx->src[i].rel = 0;
1616 }
1617 }
1618 return 0;
1619 }
1620
1621
1622 /* Tessellation shaders pass outputs to the next shader using LDS.
1623 *
1624 * LS outputs = TCS(HS) inputs
1625 * TCS(HS) outputs = TES(DS) inputs
1626 *
1627 * The LDS layout is:
1628 * - TCS inputs for patch 0
1629 * - TCS inputs for patch 1
1630 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
1631 * - ...
1632 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
1633 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
1634 * - TCS outputs for patch 1
1635 * - Per-patch TCS outputs for patch 1
1636 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
1637 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
1638 * - ...
1639 *
1640 * All three shaders VS(LS), TCS, TES share the same LDS space.
1641 */
1642 /* this will return with the dw address in temp_reg.x */
1643 static int r600_get_byte_address(struct r600_shader_ctx *ctx, int temp_reg,
1644 const struct tgsi_full_dst_register *dst,
1645 const struct tgsi_full_src_register *src,
1646 int stride_bytes_reg, int stride_bytes_chan)
1647 {
1648 struct tgsi_full_dst_register reg;
1649 ubyte *name, *index, *array_first;
1650 int r;
1651 int param;
1652 struct tgsi_shader_info *info = &ctx->info;
1653 /* Set the register description. The address computation is the same
1654 * for sources and destinations. */
1655 if (src) {
1656 reg.Register.File = src->Register.File;
1657 reg.Register.Index = src->Register.Index;
1658 reg.Register.Indirect = src->Register.Indirect;
1659 reg.Register.Dimension = src->Register.Dimension;
1660 reg.Indirect = src->Indirect;
1661 reg.Dimension = src->Dimension;
1662 reg.DimIndirect = src->DimIndirect;
1663 } else
1664 reg = *dst;
1665
1666 /* If the register is 2-dimensional (e.g. an array of vertices
1667 * in a primitive), calculate the base address of the vertex. */
1668 if (reg.Register.Dimension) {
1669 int sel, chan;
1670 if (reg.Dimension.Indirect) {
1671 unsigned addr_reg;
1672 assert (reg.DimIndirect.File == TGSI_FILE_ADDRESS);
1673
1674 addr_reg = get_address_file_reg(ctx, reg.DimIndirect.Index);
1675 /* pull the value from index_reg */
1676 sel = addr_reg;
1677 chan = 0;
1678 } else {
1679 sel = V_SQ_ALU_SRC_LITERAL;
1680 chan = reg.Dimension.Index;
1681 }
1682
1683 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
1684 temp_reg, 0,
1685 stride_bytes_reg, stride_bytes_chan,
1686 sel, chan,
1687 temp_reg, 0);
1688 if (r)
1689 return r;
1690 }
1691
1692 if (reg.Register.File == TGSI_FILE_INPUT) {
1693 name = info->input_semantic_name;
1694 index = info->input_semantic_index;
1695 array_first = info->input_array_first;
1696 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
1697 name = info->output_semantic_name;
1698 index = info->output_semantic_index;
1699 array_first = info->output_array_first;
1700 } else {
1701 assert(0);
1702 return -1;
1703 }
1704 if (reg.Register.Indirect) {
1705 int addr_reg;
1706 int first;
1707 /* Add the relative address of the element. */
1708 if (reg.Indirect.ArrayID)
1709 first = array_first[reg.Indirect.ArrayID];
1710 else
1711 first = reg.Register.Index;
1712
1713 addr_reg = get_address_file_reg(ctx, reg.Indirect.Index);
1714
1715 /* pull the value from index_reg */
1716 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
1717 temp_reg, 0,
1718 V_SQ_ALU_SRC_LITERAL, 16,
1719 addr_reg, 0,
1720 temp_reg, 0);
1721 if (r)
1722 return r;
1723
1724 param = r600_get_lds_unique_index(name[first],
1725 index[first]);
1726
1727 } else {
1728 param = r600_get_lds_unique_index(name[reg.Register.Index],
1729 index[reg.Register.Index]);
1730 }
1731
1732 /* add to base_addr - passed in temp_reg.x */
1733 if (param) {
1734 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
1735 temp_reg, 0,
1736 temp_reg, 0,
1737 V_SQ_ALU_SRC_LITERAL, param * 16);
1738 if (r)
1739 return r;
1740
1741 }
1742 return 0;
1743 }
1744
1745 static int do_lds_fetch_values(struct r600_shader_ctx *ctx, unsigned temp_reg,
1746 unsigned dst_reg)
1747 {
1748 struct r600_bytecode_alu alu;
1749 int r, i;
1750
1751 if ((ctx->bc->cf_last->ndw>>1) >= 0x60)
1752 ctx->bc->force_add_cf = 1;
1753 for (i = 1; i < 4; i++) {
1754 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
1755 temp_reg, i,
1756 temp_reg, 0,
1757 V_SQ_ALU_SRC_LITERAL, 4 * i);
1758 if (r)
1759 return r;
1760 }
1761 for (i = 0; i < 4; i++) {
1762 /* emit an LDS_READ_RET */
1763 memset(&alu, 0, sizeof(alu));
1764 alu.op = LDS_OP1_LDS_READ_RET;
1765 alu.src[0].sel = temp_reg;
1766 alu.src[0].chan = i;
1767 alu.src[1].sel = V_SQ_ALU_SRC_0;
1768 alu.src[2].sel = V_SQ_ALU_SRC_0;
1769 alu.dst.chan = 0;
1770 alu.is_lds_idx_op = true;
1771 alu.last = 1;
1772 r = r600_bytecode_add_alu(ctx->bc, &alu);
1773 if (r)
1774 return r;
1775 }
1776 for (i = 0; i < 4; i++) {
1777 /* then read from LDS_OQ_A_POP */
1778 memset(&alu, 0, sizeof(alu));
1779
1780 alu.op = ALU_OP1_MOV;
1781 alu.src[0].sel = EG_V_SQ_ALU_SRC_LDS_OQ_A_POP;
1782 alu.src[0].chan = 0;
1783 alu.dst.sel = dst_reg;
1784 alu.dst.chan = i;
1785 alu.dst.write = 1;
1786 alu.last = 1;
1787 r = r600_bytecode_add_alu(ctx->bc, &alu);
1788 if (r)
1789 return r;
1790 }
1791 return 0;
1792 }
1793
1794 static int fetch_tes_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg)
1795 {
1796 int r;
1797 unsigned temp_reg = r600_get_temp(ctx);
1798
1799 r = get_lds_offset0(ctx, 2, temp_reg,
1800 src->Register.Dimension ? false : true);
1801 if (r)
1802 return r;
1803
1804 /* the base address is now in temp.x */
1805 r = r600_get_byte_address(ctx, temp_reg,
1806 NULL, src, ctx->tess_output_info, 1);
1807 if (r)
1808 return r;
1809
1810 r = do_lds_fetch_values(ctx, temp_reg, dst_reg);
1811 if (r)
1812 return r;
1813 return 0;
1814 }
1815
1816 static int fetch_tcs_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg)
1817 {
1818 int r;
1819 unsigned temp_reg = r600_get_temp(ctx);
1820
1821 /* t.x = ips * r0.y */
1822 r = single_alu_op2(ctx, ALU_OP2_MUL_UINT24,
1823 temp_reg, 0,
1824 ctx->tess_input_info, 0,
1825 0, 1);
1826
1827 if (r)
1828 return r;
1829
1830 /* the base address is now in temp.x */
1831 r = r600_get_byte_address(ctx, temp_reg,
1832 NULL, src, ctx->tess_input_info, 1);
1833 if (r)
1834 return r;
1835
1836 r = do_lds_fetch_values(ctx, temp_reg, dst_reg);
1837 if (r)
1838 return r;
1839 return 0;
1840 }
1841
1842 static int fetch_tcs_output(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg)
1843 {
1844 int r;
1845 unsigned temp_reg = r600_get_temp(ctx);
1846
1847 r = get_lds_offset0(ctx, 1, temp_reg,
1848 src->Register.Dimension ? false : true);
1849 if (r)
1850 return r;
1851 /* the base address is now in temp.x */
1852 r = r600_get_byte_address(ctx, temp_reg,
1853 NULL, src,
1854 ctx->tess_output_info, 1);
1855 if (r)
1856 return r;
1857
1858 r = do_lds_fetch_values(ctx, temp_reg, dst_reg);
1859 if (r)
1860 return r;
1861 return 0;
1862 }
1863
1864 static int tgsi_split_lds_inputs(struct r600_shader_ctx *ctx)
1865 {
1866 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1867 unsigned i;
1868
1869 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
1870 struct tgsi_full_src_register *src = &inst->Src[i];
1871
1872 if (ctx->type == PIPE_SHADER_TESS_EVAL && src->Register.File == TGSI_FILE_INPUT) {
1873 int treg = r600_get_temp(ctx);
1874 fetch_tes_input(ctx, src, treg);
1875 ctx->src[i].sel = treg;
1876 ctx->src[i].rel = 0;
1877 }
1878 if (ctx->type == PIPE_SHADER_TESS_CTRL && src->Register.File == TGSI_FILE_INPUT) {
1879 int treg = r600_get_temp(ctx);
1880 fetch_tcs_input(ctx, src, treg);
1881 ctx->src[i].sel = treg;
1882 ctx->src[i].rel = 0;
1883 }
1884 if (ctx->type == PIPE_SHADER_TESS_CTRL && src->Register.File == TGSI_FILE_OUTPUT) {
1885 int treg = r600_get_temp(ctx);
1886 fetch_tcs_output(ctx, src, treg);
1887 ctx->src[i].sel = treg;
1888 ctx->src[i].rel = 0;
1889 }
1890 }
1891 return 0;
1892 }
1893
1894 static int tgsi_split_constant(struct r600_shader_ctx *ctx)
1895 {
1896 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1897 struct r600_bytecode_alu alu;
1898 int i, j, k, nconst, r;
1899
1900 for (i = 0, nconst = 0; i < inst->Instruction.NumSrcRegs; i++) {
1901 if (inst->Src[i].Register.File == TGSI_FILE_CONSTANT) {
1902 nconst++;
1903 }
1904 tgsi_src(ctx, &inst->Src[i], &ctx->src[i]);
1905 }
1906 for (i = 0, j = nconst - 1; i < inst->Instruction.NumSrcRegs; i++) {
1907 if (inst->Src[i].Register.File != TGSI_FILE_CONSTANT) {
1908 continue;
1909 }
1910
1911 if (ctx->src[i].rel) {
1912 int chan = inst->Src[i].Indirect.Swizzle;
1913 int treg = r600_get_temp(ctx);
1914 if ((r = tgsi_fetch_rel_const(ctx, ctx->src[i].kc_bank, ctx->src[i].kc_rel, ctx->src[i].sel - 512, chan, treg)))
1915 return r;
1916
1917 ctx->src[i].kc_bank = 0;
1918 ctx->src[i].kc_rel = 0;
1919 ctx->src[i].sel = treg;
1920 ctx->src[i].rel = 0;
1921 j--;
1922 } else if (j > 0) {
1923 int treg = r600_get_temp(ctx);
1924 for (k = 0; k < 4; k++) {
1925 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1926 alu.op = ALU_OP1_MOV;
1927 alu.src[0].sel = ctx->src[i].sel;
1928 alu.src[0].chan = k;
1929 alu.src[0].rel = ctx->src[i].rel;
1930 alu.src[0].kc_bank = ctx->src[i].kc_bank;
1931 alu.src[0].kc_rel = ctx->src[i].kc_rel;
1932 alu.dst.sel = treg;
1933 alu.dst.chan = k;
1934 alu.dst.write = 1;
1935 if (k == 3)
1936 alu.last = 1;
1937 r = r600_bytecode_add_alu(ctx->bc, &alu);
1938 if (r)
1939 return r;
1940 }
1941 ctx->src[i].sel = treg;
1942 ctx->src[i].rel =0;
1943 j--;
1944 }
1945 }
1946 return 0;
1947 }
1948
1949 /* need to move any immediate into a temp - for trig functions which use literal for PI stuff */
1950 static int tgsi_split_literal_constant(struct r600_shader_ctx *ctx)
1951 {
1952 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1953 struct r600_bytecode_alu alu;
1954 int i, j, k, nliteral, r;
1955
1956 for (i = 0, nliteral = 0; i < inst->Instruction.NumSrcRegs; i++) {
1957 if (ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
1958 nliteral++;
1959 }
1960 }
1961 for (i = 0, j = nliteral - 1; i < inst->Instruction.NumSrcRegs; i++) {
1962 if (j > 0 && ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
1963 int treg = r600_get_temp(ctx);
1964 for (k = 0; k < 4; k++) {
1965 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1966 alu.op = ALU_OP1_MOV;
1967 alu.src[0].sel = ctx->src[i].sel;
1968 alu.src[0].chan = k;
1969 alu.src[0].value = ctx->src[i].value[k];
1970 alu.dst.sel = treg;
1971 alu.dst.chan = k;
1972 alu.dst.write = 1;
1973 if (k == 3)
1974 alu.last = 1;
1975 r = r600_bytecode_add_alu(ctx->bc, &alu);
1976 if (r)
1977 return r;
1978 }
1979 ctx->src[i].sel = treg;
1980 j--;
1981 }
1982 }
1983 return 0;
1984 }
1985
1986 static int process_twoside_color_inputs(struct r600_shader_ctx *ctx)
1987 {
1988 int i, r, count = ctx->shader->ninput;
1989
1990 for (i = 0; i < count; i++) {
1991 if (ctx->shader->input[i].name == TGSI_SEMANTIC_COLOR) {
1992 r = select_twoside_color(ctx, i, ctx->shader->input[i].back_color_input);
1993 if (r)
1994 return r;
1995 }
1996 }
1997 return 0;
1998 }
1999
2000 static int emit_streamout(struct r600_shader_ctx *ctx, struct pipe_stream_output_info *so,
2001 int stream, unsigned *stream_item_size)
2002 {
2003 unsigned so_gpr[PIPE_MAX_SHADER_OUTPUTS];
2004 unsigned start_comp[PIPE_MAX_SHADER_OUTPUTS];
2005 int i, j, r;
2006
2007 /* Sanity checking. */
2008 if (so->num_outputs > PIPE_MAX_SO_OUTPUTS) {
2009 R600_ERR("Too many stream outputs: %d\n", so->num_outputs);
2010 r = -EINVAL;
2011 goto out_err;
2012 }
2013 for (i = 0; i < so->num_outputs; i++) {
2014 if (so->output[i].output_buffer >= 4) {
2015 R600_ERR("Exceeded the max number of stream output buffers, got: %d\n",
2016 so->output[i].output_buffer);
2017 r = -EINVAL;
2018 goto out_err;
2019 }
2020 }
2021
2022 /* Initialize locations where the outputs are stored. */
2023 for (i = 0; i < so->num_outputs; i++) {
2024
2025 so_gpr[i] = ctx->shader->output[so->output[i].register_index].gpr;
2026 start_comp[i] = so->output[i].start_component;
2027 /* Lower outputs with dst_offset < start_component.
2028 *
2029 * We can only output 4D vectors with a write mask, e.g. we can
2030 * only output the W component at offset 3, etc. If we want
2031 * to store Y, Z, or W at buffer offset 0, we need to use MOV
2032 * to move it to X and output X. */
2033 if (so->output[i].dst_offset < so->output[i].start_component) {
2034 unsigned tmp = r600_get_temp(ctx);
2035
2036 for (j = 0; j < so->output[i].num_components; j++) {
2037 struct r600_bytecode_alu alu;
2038 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2039 alu.op = ALU_OP1_MOV;
2040 alu.src[0].sel = so_gpr[i];
2041 alu.src[0].chan = so->output[i].start_component + j;
2042
2043 alu.dst.sel = tmp;
2044 alu.dst.chan = j;
2045 alu.dst.write = 1;
2046 if (j == so->output[i].num_components - 1)
2047 alu.last = 1;
2048 r = r600_bytecode_add_alu(ctx->bc, &alu);
2049 if (r)
2050 return r;
2051 }
2052 start_comp[i] = 0;
2053 so_gpr[i] = tmp;
2054 }
2055 }
2056
2057 /* Write outputs to buffers. */
2058 for (i = 0; i < so->num_outputs; i++) {
2059 struct r600_bytecode_output output;
2060
2061 if (stream != -1 && stream != so->output[i].output_buffer)
2062 continue;
2063
2064 memset(&output, 0, sizeof(struct r600_bytecode_output));
2065 output.gpr = so_gpr[i];
2066 output.elem_size = so->output[i].num_components - 1;
2067 if (output.elem_size == 2)
2068 output.elem_size = 3; // 3 not supported, write 4 with junk at end
2069 output.array_base = so->output[i].dst_offset - start_comp[i];
2070 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE;
2071 output.burst_count = 1;
2072 /* array_size is an upper limit for the burst_count
2073 * with MEM_STREAM instructions */
2074 output.array_size = 0xFFF;
2075 output.comp_mask = ((1 << so->output[i].num_components) - 1) << start_comp[i];
2076
2077 if (ctx->bc->chip_class >= EVERGREEN) {
2078 switch (so->output[i].output_buffer) {
2079 case 0:
2080 output.op = CF_OP_MEM_STREAM0_BUF0;
2081 break;
2082 case 1:
2083 output.op = CF_OP_MEM_STREAM0_BUF1;
2084 break;
2085 case 2:
2086 output.op = CF_OP_MEM_STREAM0_BUF2;
2087 break;
2088 case 3:
2089 output.op = CF_OP_MEM_STREAM0_BUF3;
2090 break;
2091 }
2092 output.op += so->output[i].stream * 4;
2093 assert(output.op >= CF_OP_MEM_STREAM0_BUF0 && output.op <= CF_OP_MEM_STREAM3_BUF3);
2094 ctx->enabled_stream_buffers_mask |= (1 << so->output[i].output_buffer) << so->output[i].stream * 4;
2095 } else {
2096 switch (so->output[i].output_buffer) {
2097 case 0:
2098 output.op = CF_OP_MEM_STREAM0;
2099 break;
2100 case 1:
2101 output.op = CF_OP_MEM_STREAM1;
2102 break;
2103 case 2:
2104 output.op = CF_OP_MEM_STREAM2;
2105 break;
2106 case 3:
2107 output.op = CF_OP_MEM_STREAM3;
2108 break;
2109 }
2110 ctx->enabled_stream_buffers_mask |= 1 << so->output[i].output_buffer;
2111 }
2112 r = r600_bytecode_add_output(ctx->bc, &output);
2113 if (r)
2114 goto out_err;
2115 }
2116 return 0;
2117 out_err:
2118 return r;
2119 }
2120
2121 static void convert_edgeflag_to_int(struct r600_shader_ctx *ctx)
2122 {
2123 struct r600_bytecode_alu alu;
2124 unsigned reg;
2125
2126 if (!ctx->shader->vs_out_edgeflag)
2127 return;
2128
2129 reg = ctx->shader->output[ctx->edgeflag_output].gpr;
2130
2131 /* clamp(x, 0, 1) */
2132 memset(&alu, 0, sizeof(alu));
2133 alu.op = ALU_OP1_MOV;
2134 alu.src[0].sel = reg;
2135 alu.dst.sel = reg;
2136 alu.dst.write = 1;
2137 alu.dst.clamp = 1;
2138 alu.last = 1;
2139 r600_bytecode_add_alu(ctx->bc, &alu);
2140
2141 memset(&alu, 0, sizeof(alu));
2142 alu.op = ALU_OP1_FLT_TO_INT;
2143 alu.src[0].sel = reg;
2144 alu.dst.sel = reg;
2145 alu.dst.write = 1;
2146 alu.last = 1;
2147 r600_bytecode_add_alu(ctx->bc, &alu);
2148 }
2149
2150 static int generate_gs_copy_shader(struct r600_context *rctx,
2151 struct r600_pipe_shader *gs,
2152 struct pipe_stream_output_info *so)
2153 {
2154 struct r600_shader_ctx ctx = {};
2155 struct r600_shader *gs_shader = &gs->shader;
2156 struct r600_pipe_shader *cshader;
2157 int ocnt = gs_shader->noutput;
2158 struct r600_bytecode_alu alu;
2159 struct r600_bytecode_vtx vtx;
2160 struct r600_bytecode_output output;
2161 struct r600_bytecode_cf *cf_jump, *cf_pop,
2162 *last_exp_pos = NULL, *last_exp_param = NULL;
2163 int i, j, next_clip_pos = 61, next_param = 0;
2164 int ring;
2165 bool only_ring_0 = true;
2166 cshader = calloc(1, sizeof(struct r600_pipe_shader));
2167 if (!cshader)
2168 return 0;
2169
2170 memcpy(cshader->shader.output, gs_shader->output, ocnt *
2171 sizeof(struct r600_shader_io));
2172
2173 cshader->shader.noutput = ocnt;
2174
2175 ctx.shader = &cshader->shader;
2176 ctx.bc = &ctx.shader->bc;
2177 ctx.type = ctx.bc->type = PIPE_SHADER_VERTEX;
2178
2179 r600_bytecode_init(ctx.bc, rctx->b.chip_class, rctx->b.family,
2180 rctx->screen->has_compressed_msaa_texturing);
2181
2182 ctx.bc->isa = rctx->isa;
2183
2184 cf_jump = NULL;
2185 memset(cshader->shader.ring_item_sizes, 0, sizeof(cshader->shader.ring_item_sizes));
2186
2187 /* R0.x = R0.x & 0x3fffffff */
2188 memset(&alu, 0, sizeof(alu));
2189 alu.op = ALU_OP2_AND_INT;
2190 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2191 alu.src[1].value = 0x3fffffff;
2192 alu.dst.write = 1;
2193 r600_bytecode_add_alu(ctx.bc, &alu);
2194
2195 /* R0.y = R0.x >> 30 */
2196 memset(&alu, 0, sizeof(alu));
2197 alu.op = ALU_OP2_LSHR_INT;
2198 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2199 alu.src[1].value = 0x1e;
2200 alu.dst.chan = 1;
2201 alu.dst.write = 1;
2202 alu.last = 1;
2203 r600_bytecode_add_alu(ctx.bc, &alu);
2204
2205 /* fetch vertex data from GSVS ring */
2206 for (i = 0; i < ocnt; ++i) {
2207 struct r600_shader_io *out = &ctx.shader->output[i];
2208
2209 out->gpr = i + 1;
2210 out->ring_offset = i * 16;
2211
2212 memset(&vtx, 0, sizeof(vtx));
2213 vtx.op = FETCH_OP_VFETCH;
2214 vtx.buffer_id = R600_GS_RING_CONST_BUFFER;
2215 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
2216 vtx.mega_fetch_count = 16;
2217 vtx.offset = out->ring_offset;
2218 vtx.dst_gpr = out->gpr;
2219 vtx.src_gpr = 0;
2220 vtx.dst_sel_x = 0;
2221 vtx.dst_sel_y = 1;
2222 vtx.dst_sel_z = 2;
2223 vtx.dst_sel_w = 3;
2224 if (rctx->b.chip_class >= EVERGREEN) {
2225 vtx.use_const_fields = 1;
2226 } else {
2227 vtx.data_format = FMT_32_32_32_32_FLOAT;
2228 }
2229
2230 r600_bytecode_add_vtx(ctx.bc, &vtx);
2231 }
2232 ctx.temp_reg = i + 1;
2233 for (ring = 3; ring >= 0; --ring) {
2234 bool enabled = false;
2235 for (i = 0; i < so->num_outputs; i++) {
2236 if (so->output[i].stream == ring) {
2237 enabled = true;
2238 if (ring > 0)
2239 only_ring_0 = false;
2240 break;
2241 }
2242 }
2243 if (ring != 0 && !enabled) {
2244 cshader->shader.ring_item_sizes[ring] = 0;
2245 continue;
2246 }
2247
2248 if (cf_jump) {
2249 // Patch up jump label
2250 r600_bytecode_add_cfinst(ctx.bc, CF_OP_POP);
2251 cf_pop = ctx.bc->cf_last;
2252
2253 cf_jump->cf_addr = cf_pop->id + 2;
2254 cf_jump->pop_count = 1;
2255 cf_pop->cf_addr = cf_pop->id + 2;
2256 cf_pop->pop_count = 1;
2257 }
2258
2259 /* PRED_SETE_INT __, R0.y, ring */
2260 memset(&alu, 0, sizeof(alu));
2261 alu.op = ALU_OP2_PRED_SETE_INT;
2262 alu.src[0].chan = 1;
2263 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2264 alu.src[1].value = ring;
2265 alu.execute_mask = 1;
2266 alu.update_pred = 1;
2267 alu.last = 1;
2268 r600_bytecode_add_alu_type(ctx.bc, &alu, CF_OP_ALU_PUSH_BEFORE);
2269
2270 r600_bytecode_add_cfinst(ctx.bc, CF_OP_JUMP);
2271 cf_jump = ctx.bc->cf_last;
2272
2273 if (enabled)
2274 emit_streamout(&ctx, so, only_ring_0 ? -1 : ring, &cshader->shader.ring_item_sizes[ring]);
2275 cshader->shader.ring_item_sizes[ring] = ocnt * 16;
2276 }
2277
2278 /* bc adds nops - copy it */
2279 if (ctx.bc->chip_class == R600) {
2280 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2281 alu.op = ALU_OP0_NOP;
2282 alu.last = 1;
2283 r600_bytecode_add_alu(ctx.bc, &alu);
2284
2285 r600_bytecode_add_cfinst(ctx.bc, CF_OP_NOP);
2286 }
2287
2288 /* export vertex data */
2289 /* XXX factor out common code with r600_shader_from_tgsi ? */
2290 for (i = 0; i < ocnt; ++i) {
2291 struct r600_shader_io *out = &ctx.shader->output[i];
2292 bool instream0 = true;
2293 if (out->name == TGSI_SEMANTIC_CLIPVERTEX)
2294 continue;
2295
2296 for (j = 0; j < so->num_outputs; j++) {
2297 if (so->output[j].register_index == i) {
2298 if (so->output[j].stream == 0)
2299 break;
2300 if (so->output[j].stream > 0)
2301 instream0 = false;
2302 }
2303 }
2304 if (!instream0)
2305 continue;
2306 memset(&output, 0, sizeof(output));
2307 output.gpr = out->gpr;
2308 output.elem_size = 3;
2309 output.swizzle_x = 0;
2310 output.swizzle_y = 1;
2311 output.swizzle_z = 2;
2312 output.swizzle_w = 3;
2313 output.burst_count = 1;
2314 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2315 output.op = CF_OP_EXPORT;
2316 switch (out->name) {
2317 case TGSI_SEMANTIC_POSITION:
2318 output.array_base = 60;
2319 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2320 break;
2321
2322 case TGSI_SEMANTIC_PSIZE:
2323 output.array_base = 61;
2324 if (next_clip_pos == 61)
2325 next_clip_pos = 62;
2326 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2327 output.swizzle_y = 7;
2328 output.swizzle_z = 7;
2329 output.swizzle_w = 7;
2330 ctx.shader->vs_out_misc_write = 1;
2331 ctx.shader->vs_out_point_size = 1;
2332 break;
2333 case TGSI_SEMANTIC_LAYER:
2334 if (out->spi_sid) {
2335 /* duplicate it as PARAM to pass to the pixel shader */
2336 output.array_base = next_param++;
2337 r600_bytecode_add_output(ctx.bc, &output);
2338 last_exp_param = ctx.bc->cf_last;
2339 }
2340 output.array_base = 61;
2341 if (next_clip_pos == 61)
2342 next_clip_pos = 62;
2343 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2344 output.swizzle_x = 7;
2345 output.swizzle_y = 7;
2346 output.swizzle_z = 0;
2347 output.swizzle_w = 7;
2348 ctx.shader->vs_out_misc_write = 1;
2349 ctx.shader->vs_out_layer = 1;
2350 break;
2351 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2352 if (out->spi_sid) {
2353 /* duplicate it as PARAM to pass to the pixel shader */
2354 output.array_base = next_param++;
2355 r600_bytecode_add_output(ctx.bc, &output);
2356 last_exp_param = ctx.bc->cf_last;
2357 }
2358 output.array_base = 61;
2359 if (next_clip_pos == 61)
2360 next_clip_pos = 62;
2361 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2362 ctx.shader->vs_out_misc_write = 1;
2363 ctx.shader->vs_out_viewport = 1;
2364 output.swizzle_x = 7;
2365 output.swizzle_y = 7;
2366 output.swizzle_z = 7;
2367 output.swizzle_w = 0;
2368 break;
2369 case TGSI_SEMANTIC_CLIPDIST:
2370 /* spi_sid is 0 for clipdistance outputs that were generated
2371 * for clipvertex - we don't need to pass them to PS */
2372 ctx.shader->clip_dist_write = gs->shader.clip_dist_write;
2373 ctx.shader->cull_dist_write = gs->shader.cull_dist_write;
2374 ctx.shader->cc_dist_mask = gs->shader.cc_dist_mask;
2375 if (out->spi_sid) {
2376 /* duplicate it as PARAM to pass to the pixel shader */
2377 output.array_base = next_param++;
2378 r600_bytecode_add_output(ctx.bc, &output);
2379 last_exp_param = ctx.bc->cf_last;
2380 }
2381 output.array_base = next_clip_pos++;
2382 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2383 break;
2384 case TGSI_SEMANTIC_FOG:
2385 output.swizzle_y = 4; /* 0 */
2386 output.swizzle_z = 4; /* 0 */
2387 output.swizzle_w = 5; /* 1 */
2388 break;
2389 default:
2390 output.array_base = next_param++;
2391 break;
2392 }
2393 r600_bytecode_add_output(ctx.bc, &output);
2394 if (output.type == V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM)
2395 last_exp_param = ctx.bc->cf_last;
2396 else
2397 last_exp_pos = ctx.bc->cf_last;
2398 }
2399
2400 if (!last_exp_pos) {
2401 memset(&output, 0, sizeof(output));
2402 output.gpr = 0;
2403 output.elem_size = 3;
2404 output.swizzle_x = 7;
2405 output.swizzle_y = 7;
2406 output.swizzle_z = 7;
2407 output.swizzle_w = 7;
2408 output.burst_count = 1;
2409 output.type = 2;
2410 output.op = CF_OP_EXPORT;
2411 output.array_base = 60;
2412 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2413 r600_bytecode_add_output(ctx.bc, &output);
2414 last_exp_pos = ctx.bc->cf_last;
2415 }
2416
2417 if (!last_exp_param) {
2418 memset(&output, 0, sizeof(output));
2419 output.gpr = 0;
2420 output.elem_size = 3;
2421 output.swizzle_x = 7;
2422 output.swizzle_y = 7;
2423 output.swizzle_z = 7;
2424 output.swizzle_w = 7;
2425 output.burst_count = 1;
2426 output.type = 2;
2427 output.op = CF_OP_EXPORT;
2428 output.array_base = next_param++;
2429 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2430 r600_bytecode_add_output(ctx.bc, &output);
2431 last_exp_param = ctx.bc->cf_last;
2432 }
2433
2434 last_exp_pos->op = CF_OP_EXPORT_DONE;
2435 last_exp_param->op = CF_OP_EXPORT_DONE;
2436
2437 r600_bytecode_add_cfinst(ctx.bc, CF_OP_POP);
2438 cf_pop = ctx.bc->cf_last;
2439
2440 cf_jump->cf_addr = cf_pop->id + 2;
2441 cf_jump->pop_count = 1;
2442 cf_pop->cf_addr = cf_pop->id + 2;
2443 cf_pop->pop_count = 1;
2444
2445 if (ctx.bc->chip_class == CAYMAN)
2446 cm_bytecode_add_cf_end(ctx.bc);
2447 else {
2448 r600_bytecode_add_cfinst(ctx.bc, CF_OP_NOP);
2449 ctx.bc->cf_last->end_of_program = 1;
2450 }
2451
2452 gs->gs_copy_shader = cshader;
2453 cshader->enabled_stream_buffers_mask = ctx.enabled_stream_buffers_mask;
2454
2455 ctx.bc->nstack = 1;
2456
2457 return r600_bytecode_build(ctx.bc);
2458 }
2459
2460 static int emit_inc_ring_offset(struct r600_shader_ctx *ctx, int idx, bool ind)
2461 {
2462 if (ind) {
2463 struct r600_bytecode_alu alu;
2464 int r;
2465
2466 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2467 alu.op = ALU_OP2_ADD_INT;
2468 alu.src[0].sel = ctx->gs_export_gpr_tregs[idx];
2469 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2470 alu.src[1].value = ctx->gs_out_ring_offset >> 4;
2471 alu.dst.sel = ctx->gs_export_gpr_tregs[idx];
2472 alu.dst.write = 1;
2473 alu.last = 1;
2474 r = r600_bytecode_add_alu(ctx->bc, &alu);
2475 if (r)
2476 return r;
2477 }
2478 return 0;
2479 }
2480
2481 static int emit_gs_ring_writes(struct r600_shader_ctx *ctx, const struct pipe_stream_output_info *so, int stream, bool ind)
2482 {
2483 struct r600_bytecode_output output;
2484 int i, k, ring_offset;
2485 int effective_stream = stream == -1 ? 0 : stream;
2486 int idx = 0;
2487
2488 for (i = 0; i < ctx->shader->noutput; i++) {
2489 if (ctx->gs_for_vs) {
2490 /* for ES we need to lookup corresponding ring offset expected by GS
2491 * (map this output to GS input by name and sid) */
2492 /* FIXME precompute offsets */
2493 ring_offset = -1;
2494 for(k = 0; k < ctx->gs_for_vs->ninput; ++k) {
2495 struct r600_shader_io *in = &ctx->gs_for_vs->input[k];
2496 struct r600_shader_io *out = &ctx->shader->output[i];
2497 if (in->name == out->name && in->sid == out->sid)
2498 ring_offset = in->ring_offset;
2499 }
2500
2501 if (ring_offset == -1)
2502 continue;
2503 } else {
2504 ring_offset = idx * 16;
2505 idx++;
2506 }
2507
2508 if (stream > 0 && ctx->shader->output[i].name == TGSI_SEMANTIC_POSITION)
2509 continue;
2510 /* next_ring_offset after parsing input decls contains total size of
2511 * single vertex data, gs_next_vertex - current vertex index */
2512 if (!ind)
2513 ring_offset += ctx->gs_out_ring_offset * ctx->gs_next_vertex;
2514
2515 memset(&output, 0, sizeof(struct r600_bytecode_output));
2516 output.gpr = ctx->shader->output[i].gpr;
2517 output.elem_size = 3;
2518 output.comp_mask = 0xF;
2519 output.burst_count = 1;
2520
2521 if (ind)
2522 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND;
2523 else
2524 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE;
2525
2526 switch (stream) {
2527 default:
2528 case 0:
2529 output.op = CF_OP_MEM_RING; break;
2530 case 1:
2531 output.op = CF_OP_MEM_RING1; break;
2532 case 2:
2533 output.op = CF_OP_MEM_RING2; break;
2534 case 3:
2535 output.op = CF_OP_MEM_RING3; break;
2536 }
2537
2538 if (ind) {
2539 output.array_base = ring_offset >> 2; /* in dwords */
2540 output.array_size = 0xfff;
2541 output.index_gpr = ctx->gs_export_gpr_tregs[effective_stream];
2542 } else
2543 output.array_base = ring_offset >> 2; /* in dwords */
2544 r600_bytecode_add_output(ctx->bc, &output);
2545 }
2546
2547 ++ctx->gs_next_vertex;
2548 return 0;
2549 }
2550
2551
2552 static int r600_fetch_tess_io_info(struct r600_shader_ctx *ctx)
2553 {
2554 int r;
2555 struct r600_bytecode_vtx vtx;
2556 int temp_val = ctx->temp_reg;
2557 /* need to store the TCS output somewhere */
2558 r = single_alu_op2(ctx, ALU_OP1_MOV,
2559 temp_val, 0,
2560 V_SQ_ALU_SRC_LITERAL, 0,
2561 0, 0);
2562 if (r)
2563 return r;
2564
2565 /* used by VS/TCS */
2566 if (ctx->tess_input_info) {
2567 /* fetch tcs input values into resv space */
2568 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
2569 vtx.op = FETCH_OP_VFETCH;
2570 vtx.buffer_id = R600_LDS_INFO_CONST_BUFFER;
2571 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
2572 vtx.mega_fetch_count = 16;
2573 vtx.data_format = FMT_32_32_32_32;
2574 vtx.num_format_all = 2;
2575 vtx.format_comp_all = 1;
2576 vtx.use_const_fields = 0;
2577 vtx.endian = r600_endian_swap(32);
2578 vtx.srf_mode_all = 1;
2579 vtx.offset = 0;
2580 vtx.dst_gpr = ctx->tess_input_info;
2581 vtx.dst_sel_x = 0;
2582 vtx.dst_sel_y = 1;
2583 vtx.dst_sel_z = 2;
2584 vtx.dst_sel_w = 3;
2585 vtx.src_gpr = temp_val;
2586 vtx.src_sel_x = 0;
2587
2588 r = r600_bytecode_add_vtx(ctx->bc, &vtx);
2589 if (r)
2590 return r;
2591 }
2592
2593 /* used by TCS/TES */
2594 if (ctx->tess_output_info) {
2595 /* fetch tcs output values into resv space */
2596 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
2597 vtx.op = FETCH_OP_VFETCH;
2598 vtx.buffer_id = R600_LDS_INFO_CONST_BUFFER;
2599 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
2600 vtx.mega_fetch_count = 16;
2601 vtx.data_format = FMT_32_32_32_32;
2602 vtx.num_format_all = 2;
2603 vtx.format_comp_all = 1;
2604 vtx.use_const_fields = 0;
2605 vtx.endian = r600_endian_swap(32);
2606 vtx.srf_mode_all = 1;
2607 vtx.offset = 16;
2608 vtx.dst_gpr = ctx->tess_output_info;
2609 vtx.dst_sel_x = 0;
2610 vtx.dst_sel_y = 1;
2611 vtx.dst_sel_z = 2;
2612 vtx.dst_sel_w = 3;
2613 vtx.src_gpr = temp_val;
2614 vtx.src_sel_x = 0;
2615
2616 r = r600_bytecode_add_vtx(ctx->bc, &vtx);
2617 if (r)
2618 return r;
2619 }
2620 return 0;
2621 }
2622
2623 static int emit_lds_vs_writes(struct r600_shader_ctx *ctx)
2624 {
2625 int i, j, r;
2626 int temp_reg;
2627
2628 /* fetch tcs input values into input_vals */
2629 ctx->tess_input_info = r600_get_temp(ctx);
2630 ctx->tess_output_info = 0;
2631 r = r600_fetch_tess_io_info(ctx);
2632 if (r)
2633 return r;
2634
2635 temp_reg = r600_get_temp(ctx);
2636 /* dst reg contains LDS address stride * idx */
2637 /* MUL vertexID, vertex_dw_stride */
2638 r = single_alu_op2(ctx, ALU_OP2_MUL_UINT24,
2639 temp_reg, 0,
2640 ctx->tess_input_info, 1,
2641 0, 1); /* rel id in r0.y? */
2642 if (r)
2643 return r;
2644
2645 for (i = 0; i < ctx->shader->noutput; i++) {
2646 struct r600_bytecode_alu alu;
2647 int param = r600_get_lds_unique_index(ctx->shader->output[i].name, ctx->shader->output[i].sid);
2648
2649 if (param) {
2650 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
2651 temp_reg, 1,
2652 temp_reg, 0,
2653 V_SQ_ALU_SRC_LITERAL, param * 16);
2654 if (r)
2655 return r;
2656 }
2657
2658 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
2659 temp_reg, 2,
2660 temp_reg, param ? 1 : 0,
2661 V_SQ_ALU_SRC_LITERAL, 8);
2662 if (r)
2663 return r;
2664
2665
2666 for (j = 0; j < 2; j++) {
2667 int chan = (j == 1) ? 2 : (param ? 1 : 0);
2668 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2669 alu.op = LDS_OP3_LDS_WRITE_REL;
2670 alu.src[0].sel = temp_reg;
2671 alu.src[0].chan = chan;
2672 alu.src[1].sel = ctx->shader->output[i].gpr;
2673 alu.src[1].chan = j * 2;
2674 alu.src[2].sel = ctx->shader->output[i].gpr;
2675 alu.src[2].chan = (j * 2) + 1;
2676 alu.last = 1;
2677 alu.dst.chan = 0;
2678 alu.lds_idx = 1;
2679 alu.is_lds_idx_op = true;
2680 r = r600_bytecode_add_alu(ctx->bc, &alu);
2681 if (r)
2682 return r;
2683 }
2684 }
2685 return 0;
2686 }
2687
2688 static int r600_store_tcs_output(struct r600_shader_ctx *ctx)
2689 {
2690 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2691 const struct tgsi_full_dst_register *dst = &inst->Dst[0];
2692 int i, r, lasti;
2693 int temp_reg = r600_get_temp(ctx);
2694 struct r600_bytecode_alu alu;
2695 unsigned write_mask = dst->Register.WriteMask;
2696
2697 if (inst->Dst[0].Register.File != TGSI_FILE_OUTPUT)
2698 return 0;
2699
2700 r = get_lds_offset0(ctx, 1, temp_reg, dst->Register.Dimension ? false : true);
2701 if (r)
2702 return r;
2703
2704 /* the base address is now in temp.x */
2705 r = r600_get_byte_address(ctx, temp_reg,
2706 &inst->Dst[0], NULL, ctx->tess_output_info, 1);
2707 if (r)
2708 return r;
2709
2710 /* LDS write */
2711 lasti = tgsi_last_instruction(write_mask);
2712 for (i = 1; i <= lasti; i++) {
2713
2714 if (!(write_mask & (1 << i)))
2715 continue;
2716 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
2717 temp_reg, i,
2718 temp_reg, 0,
2719 V_SQ_ALU_SRC_LITERAL, 4 * i);
2720 if (r)
2721 return r;
2722 }
2723
2724 for (i = 0; i <= lasti; i++) {
2725 if (!(write_mask & (1 << i)))
2726 continue;
2727
2728 if ((i == 0 && ((write_mask & 3) == 3)) ||
2729 (i == 2 && ((write_mask & 0xc) == 0xc))) {
2730 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2731 alu.op = LDS_OP3_LDS_WRITE_REL;
2732 alu.src[0].sel = temp_reg;
2733 alu.src[0].chan = i;
2734
2735 alu.src[1].sel = dst->Register.Index;
2736 alu.src[1].sel += ctx->file_offset[dst->Register.File];
2737 alu.src[1].chan = i;
2738
2739 alu.src[2].sel = dst->Register.Index;
2740 alu.src[2].sel += ctx->file_offset[dst->Register.File];
2741 alu.src[2].chan = i + 1;
2742 alu.lds_idx = 1;
2743 alu.dst.chan = 0;
2744 alu.last = 1;
2745 alu.is_lds_idx_op = true;
2746 r = r600_bytecode_add_alu(ctx->bc, &alu);
2747 if (r)
2748 return r;
2749 i += 1;
2750 continue;
2751 }
2752 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2753 alu.op = LDS_OP2_LDS_WRITE;
2754 alu.src[0].sel = temp_reg;
2755 alu.src[0].chan = i;
2756
2757 alu.src[1].sel = dst->Register.Index;
2758 alu.src[1].sel += ctx->file_offset[dst->Register.File];
2759 alu.src[1].chan = i;
2760
2761 alu.src[2].sel = V_SQ_ALU_SRC_0;
2762 alu.dst.chan = 0;
2763 alu.last = 1;
2764 alu.is_lds_idx_op = true;
2765 r = r600_bytecode_add_alu(ctx->bc, &alu);
2766 if (r)
2767 return r;
2768 }
2769 return 0;
2770 }
2771
2772 static int r600_tess_factor_read(struct r600_shader_ctx *ctx,
2773 int output_idx)
2774 {
2775 int param;
2776 unsigned temp_reg = r600_get_temp(ctx);
2777 unsigned name = ctx->shader->output[output_idx].name;
2778 int dreg = ctx->shader->output[output_idx].gpr;
2779 int r;
2780
2781 param = r600_get_lds_unique_index(name, 0);
2782 r = get_lds_offset0(ctx, 1, temp_reg, true);
2783 if (r)
2784 return r;
2785
2786 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
2787 temp_reg, 0,
2788 temp_reg, 0,
2789 V_SQ_ALU_SRC_LITERAL, param * 16);
2790 if (r)
2791 return r;
2792
2793 do_lds_fetch_values(ctx, temp_reg, dreg);
2794 return 0;
2795 }
2796
2797 static int r600_emit_tess_factor(struct r600_shader_ctx *ctx)
2798 {
2799 unsigned i;
2800 int stride, outer_comps, inner_comps;
2801 int tessinner_idx = -1, tessouter_idx = -1;
2802 int r;
2803 int temp_reg = r600_get_temp(ctx);
2804 int treg[3] = {-1, -1, -1};
2805 struct r600_bytecode_alu alu;
2806 struct r600_bytecode_cf *cf_jump, *cf_pop;
2807
2808 /* only execute factor emission for invocation 0 */
2809 /* PRED_SETE_INT __, R0.x, 0 */
2810 memset(&alu, 0, sizeof(alu));
2811 alu.op = ALU_OP2_PRED_SETE_INT;
2812 alu.src[0].chan = 2;
2813 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2814 alu.execute_mask = 1;
2815 alu.update_pred = 1;
2816 alu.last = 1;
2817 r600_bytecode_add_alu_type(ctx->bc, &alu, CF_OP_ALU_PUSH_BEFORE);
2818
2819 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP);
2820 cf_jump = ctx->bc->cf_last;
2821
2822 treg[0] = r600_get_temp(ctx);
2823 switch (ctx->shader->tcs_prim_mode) {
2824 case PIPE_PRIM_LINES:
2825 stride = 8; /* 2 dwords, 1 vec2 store */
2826 outer_comps = 2;
2827 inner_comps = 0;
2828 break;
2829 case PIPE_PRIM_TRIANGLES:
2830 stride = 16; /* 4 dwords, 1 vec4 store */
2831 outer_comps = 3;
2832 inner_comps = 1;
2833 treg[1] = r600_get_temp(ctx);
2834 break;
2835 case PIPE_PRIM_QUADS:
2836 stride = 24; /* 6 dwords, 2 stores (vec4 + vec2) */
2837 outer_comps = 4;
2838 inner_comps = 2;
2839 treg[1] = r600_get_temp(ctx);
2840 treg[2] = r600_get_temp(ctx);
2841 break;
2842 default:
2843 assert(0);
2844 return -1;
2845 }
2846
2847 /* R0 is InvocationID, RelPatchID, PatchID, tf_base */
2848 /* TF_WRITE takes index in R.x, value in R.y */
2849 for (i = 0; i < ctx->shader->noutput; i++) {
2850 if (ctx->shader->output[i].name == TGSI_SEMANTIC_TESSINNER)
2851 tessinner_idx = i;
2852 if (ctx->shader->output[i].name == TGSI_SEMANTIC_TESSOUTER)
2853 tessouter_idx = i;
2854 }
2855
2856 if (tessouter_idx == -1)
2857 return -1;
2858
2859 if (tessinner_idx == -1 && inner_comps)
2860 return -1;
2861
2862 if (tessouter_idx != -1) {
2863 r = r600_tess_factor_read(ctx, tessouter_idx);
2864 if (r)
2865 return r;
2866 }
2867
2868 if (tessinner_idx != -1) {
2869 r = r600_tess_factor_read(ctx, tessinner_idx);
2870 if (r)
2871 return r;
2872 }
2873
2874 /* r.x = tf_base(r0.w) + relpatchid(r0.y) * tf_stride */
2875 /* r.x = relpatchid(r0.y) * tf_stride */
2876
2877 /* multiply incoming r0.y * stride - t.x = r0.y * stride */
2878 /* add incoming r0.w to it: t.x = t.x + r0.w */
2879 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
2880 temp_reg, 0,
2881 0, 1,
2882 V_SQ_ALU_SRC_LITERAL, stride,
2883 0, 3);
2884 if (r)
2885 return r;
2886
2887 for (i = 0; i < outer_comps + inner_comps; i++) {
2888 int out_idx = i >= outer_comps ? tessinner_idx : tessouter_idx;
2889 int out_comp = i >= outer_comps ? i - outer_comps : i;
2890
2891 if (ctx->shader->tcs_prim_mode == PIPE_PRIM_LINES) {
2892 if (out_comp == 1)
2893 out_comp = 0;
2894 else if (out_comp == 0)
2895 out_comp = 1;
2896 }
2897
2898 r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
2899 treg[i / 2], (2 * (i % 2)),
2900 temp_reg, 0,
2901 V_SQ_ALU_SRC_LITERAL, 4 * i);
2902 if (r)
2903 return r;
2904 r = single_alu_op2(ctx, ALU_OP1_MOV,
2905 treg[i / 2], 1 + (2 * (i%2)),
2906 ctx->shader->output[out_idx].gpr, out_comp,
2907 0, 0);
2908 if (r)
2909 return r;
2910 }
2911 for (i = 0; i < outer_comps + inner_comps; i++) {
2912 struct r600_bytecode_gds gds;
2913
2914 memset(&gds, 0, sizeof(struct r600_bytecode_gds));
2915 gds.src_gpr = treg[i / 2];
2916 gds.src_sel_x = 2 * (i % 2);
2917 gds.src_sel_y = 1 + (2 * (i % 2));
2918 gds.src_sel_z = 4;
2919 gds.dst_sel_x = 7;
2920 gds.dst_sel_y = 7;
2921 gds.dst_sel_z = 7;
2922 gds.dst_sel_w = 7;
2923 gds.op = FETCH_OP_TF_WRITE;
2924 r = r600_bytecode_add_gds(ctx->bc, &gds);
2925 if (r)
2926 return r;
2927 }
2928
2929 // Patch up jump label
2930 r600_bytecode_add_cfinst(ctx->bc, CF_OP_POP);
2931 cf_pop = ctx->bc->cf_last;
2932
2933 cf_jump->cf_addr = cf_pop->id + 2;
2934 cf_jump->pop_count = 1;
2935 cf_pop->cf_addr = cf_pop->id + 2;
2936 cf_pop->pop_count = 1;
2937
2938 return 0;
2939 }
2940
2941 /*
2942 * We have to work out the thread ID for load and atomic
2943 * operations, which store the returned value to an index
2944 * in an intermediate buffer.
2945 * The index is calculated by taking the thread id,
2946 * calculated from the MBCNT instructions.
2947 * Then the shader engine ID is multiplied by 256,
2948 * and the wave id is added.
2949 * Then the result is multipled by 64 and thread id is
2950 * added.
2951 */
2952 static int load_thread_id_gpr(struct r600_shader_ctx *ctx)
2953 {
2954 struct r600_bytecode_alu alu;
2955 int r;
2956
2957 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2958 alu.op = ALU_OP1_MBCNT_32LO_ACCUM_PREV_INT;
2959 alu.dst.sel = ctx->temp_reg;
2960 alu.dst.chan = 0;
2961 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
2962 alu.src[0].value = 0xffffffff;
2963 alu.dst.write = 1;
2964 r = r600_bytecode_add_alu(ctx->bc, &alu);
2965 if (r)
2966 return r;
2967
2968 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2969 alu.op = ALU_OP1_MBCNT_32HI_INT;
2970 alu.dst.sel = ctx->temp_reg;
2971 alu.dst.chan = 1;
2972 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
2973 alu.src[0].value = 0xffffffff;
2974 alu.dst.write = 1;
2975 r = r600_bytecode_add_alu(ctx->bc, &alu);
2976 if (r)
2977 return r;
2978
2979 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2980 alu.op = ALU_OP3_MULADD_UINT24;
2981 alu.dst.sel = ctx->temp_reg;
2982 alu.dst.chan = 2;
2983 alu.src[0].sel = EG_V_SQ_ALU_SRC_SE_ID;
2984 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2985 alu.src[1].value = 256;
2986 alu.src[2].sel = EG_V_SQ_ALU_SRC_HW_WAVE_ID;
2987 alu.dst.write = 1;
2988 alu.is_op3 = 1;
2989 alu.last = 1;
2990 r = r600_bytecode_add_alu(ctx->bc, &alu);
2991 if (r)
2992 return r;
2993
2994 r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
2995 ctx->thread_id_gpr, 1,
2996 ctx->temp_reg, 2,
2997 V_SQ_ALU_SRC_LITERAL, 0x40,
2998 ctx->temp_reg, 0);
2999 if (r)
3000 return r;
3001 return 0;
3002 }
3003
3004 static int r600_shader_from_tgsi(struct r600_context *rctx,
3005 struct r600_pipe_shader *pipeshader,
3006 union r600_shader_key key)
3007 {
3008 struct r600_screen *rscreen = rctx->screen;
3009 struct r600_shader *shader = &pipeshader->shader;
3010 struct tgsi_token *tokens = pipeshader->selector->tokens;
3011 struct pipe_stream_output_info so = pipeshader->selector->so;
3012 struct tgsi_full_immediate *immediate;
3013 struct r600_shader_ctx ctx;
3014 struct r600_bytecode_output output[ARRAY_SIZE(shader->output)];
3015 unsigned output_done, noutput;
3016 unsigned opcode;
3017 int i, j, k, r = 0;
3018 int next_param_base = 0, next_clip_base;
3019 int max_color_exports = MAX2(key.ps.nr_cbufs, 1);
3020 bool indirect_gprs;
3021 bool ring_outputs = false;
3022 bool lds_outputs = false;
3023 bool lds_inputs = false;
3024 bool pos_emitted = false;
3025
3026 ctx.bc = &shader->bc;
3027 ctx.shader = shader;
3028 ctx.native_integers = true;
3029
3030 r600_bytecode_init(ctx.bc, rscreen->b.chip_class, rscreen->b.family,
3031 rscreen->has_compressed_msaa_texturing);
3032 ctx.tokens = tokens;
3033 tgsi_scan_shader(tokens, &ctx.info);
3034 shader->indirect_files = ctx.info.indirect_files;
3035
3036 shader->uses_doubles = ctx.info.uses_doubles;
3037 shader->uses_atomics = ctx.info.file_mask[TGSI_FILE_HW_ATOMIC];
3038 shader->nsys_inputs = 0;
3039
3040 shader->uses_images = ctx.info.file_count[TGSI_FILE_IMAGE] > 0;
3041 indirect_gprs = ctx.info.indirect_files & ~((1 << TGSI_FILE_CONSTANT) | (1 << TGSI_FILE_SAMPLER));
3042 tgsi_parse_init(&ctx.parse, tokens);
3043 ctx.type = ctx.info.processor;
3044 shader->processor_type = ctx.type;
3045 ctx.bc->type = shader->processor_type;
3046
3047 switch (ctx.type) {
3048 case PIPE_SHADER_VERTEX:
3049 shader->vs_as_gs_a = key.vs.as_gs_a;
3050 shader->vs_as_es = key.vs.as_es;
3051 shader->vs_as_ls = key.vs.as_ls;
3052 shader->atomic_base = key.vs.first_atomic_counter;
3053 if (shader->vs_as_es)
3054 ring_outputs = true;
3055 if (shader->vs_as_ls)
3056 lds_outputs = true;
3057 break;
3058 case PIPE_SHADER_GEOMETRY:
3059 ring_outputs = true;
3060 shader->atomic_base = key.gs.first_atomic_counter;
3061 shader->gs_tri_strip_adj_fix = key.gs.tri_strip_adj_fix;
3062 break;
3063 case PIPE_SHADER_TESS_CTRL:
3064 shader->tcs_prim_mode = key.tcs.prim_mode;
3065 shader->atomic_base = key.tcs.first_atomic_counter;
3066 lds_outputs = true;
3067 lds_inputs = true;
3068 break;
3069 case PIPE_SHADER_TESS_EVAL:
3070 shader->tes_as_es = key.tes.as_es;
3071 shader->atomic_base = key.tes.first_atomic_counter;
3072 lds_inputs = true;
3073 if (shader->tes_as_es)
3074 ring_outputs = true;
3075 break;
3076 case PIPE_SHADER_FRAGMENT:
3077 shader->two_side = key.ps.color_two_side;
3078 shader->atomic_base = key.ps.first_atomic_counter;
3079 shader->rat_base = key.ps.nr_cbufs;
3080 shader->image_size_const_offset = key.ps.image_size_const_offset;
3081 break;
3082 default:
3083 break;
3084 }
3085
3086 if (shader->vs_as_es || shader->tes_as_es) {
3087 ctx.gs_for_vs = &rctx->gs_shader->current->shader;
3088 } else {
3089 ctx.gs_for_vs = NULL;
3090 }
3091
3092 ctx.next_ring_offset = 0;
3093 ctx.gs_out_ring_offset = 0;
3094 ctx.gs_next_vertex = 0;
3095 ctx.gs_stream_output_info = &so;
3096
3097 ctx.face_gpr = -1;
3098 ctx.fixed_pt_position_gpr = -1;
3099 ctx.fragcoord_input = -1;
3100 ctx.colors_used = 0;
3101 ctx.clip_vertex_write = 0;
3102
3103 shader->nr_ps_color_exports = 0;
3104 shader->nr_ps_max_color_exports = 0;
3105
3106
3107 /* register allocations */
3108 /* Values [0,127] correspond to GPR[0..127].
3109 * Values [128,159] correspond to constant buffer bank 0
3110 * Values [160,191] correspond to constant buffer bank 1
3111 * Values [256,511] correspond to cfile constants c[0..255]. (Gone on EG)
3112 * Values [256,287] correspond to constant buffer bank 2 (EG)
3113 * Values [288,319] correspond to constant buffer bank 3 (EG)
3114 * Other special values are shown in the list below.
3115 * 244 ALU_SRC_1_DBL_L: special constant 1.0 double-float, LSW. (RV670+)
3116 * 245 ALU_SRC_1_DBL_M: special constant 1.0 double-float, MSW. (RV670+)
3117 * 246 ALU_SRC_0_5_DBL_L: special constant 0.5 double-float, LSW. (RV670+)
3118 * 247 ALU_SRC_0_5_DBL_M: special constant 0.5 double-float, MSW. (RV670+)
3119 * 248 SQ_ALU_SRC_0: special constant 0.0.
3120 * 249 SQ_ALU_SRC_1: special constant 1.0 float.
3121 * 250 SQ_ALU_SRC_1_INT: special constant 1 integer.
3122 * 251 SQ_ALU_SRC_M_1_INT: special constant -1 integer.
3123 * 252 SQ_ALU_SRC_0_5: special constant 0.5 float.
3124 * 253 SQ_ALU_SRC_LITERAL: literal constant.
3125 * 254 SQ_ALU_SRC_PV: previous vector result.
3126 * 255 SQ_ALU_SRC_PS: previous scalar result.
3127 */
3128 for (i = 0; i < TGSI_FILE_COUNT; i++) {
3129 ctx.file_offset[i] = 0;
3130 }
3131
3132 if (ctx.type == PIPE_SHADER_VERTEX) {
3133
3134 ctx.file_offset[TGSI_FILE_INPUT] = 1;
3135 if (ctx.info.num_inputs)
3136 r600_bytecode_add_cfinst(ctx.bc, CF_OP_CALL_FS);
3137 }
3138 if (ctx.type == PIPE_SHADER_FRAGMENT) {
3139 if (ctx.bc->chip_class >= EVERGREEN)
3140 ctx.file_offset[TGSI_FILE_INPUT] = evergreen_gpr_count(&ctx);
3141 else
3142 ctx.file_offset[TGSI_FILE_INPUT] = allocate_system_value_inputs(&ctx, ctx.file_offset[TGSI_FILE_INPUT]);
3143 }
3144 if (ctx.type == PIPE_SHADER_GEOMETRY) {
3145 /* FIXME 1 would be enough in some cases (3 or less input vertices) */
3146 ctx.file_offset[TGSI_FILE_INPUT] = 2;
3147 }
3148 if (ctx.type == PIPE_SHADER_TESS_CTRL)
3149 ctx.file_offset[TGSI_FILE_INPUT] = 1;
3150 if (ctx.type == PIPE_SHADER_TESS_EVAL) {
3151 bool add_tesscoord = false, add_tess_inout = false;
3152 ctx.file_offset[TGSI_FILE_INPUT] = 1;
3153 for (i = 0; i < PIPE_MAX_SHADER_INPUTS; i++) {
3154 /* if we have tesscoord save one reg */
3155 if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_TESSCOORD)
3156 add_tesscoord = true;
3157 if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_TESSINNER ||
3158 ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_TESSOUTER)
3159 add_tess_inout = true;
3160 }
3161 if (add_tesscoord || add_tess_inout)
3162 ctx.file_offset[TGSI_FILE_INPUT]++;
3163 if (add_tess_inout)
3164 ctx.file_offset[TGSI_FILE_INPUT]+=2;
3165 }
3166
3167 ctx.file_offset[TGSI_FILE_OUTPUT] =
3168 ctx.file_offset[TGSI_FILE_INPUT] +
3169 ctx.info.file_max[TGSI_FILE_INPUT] + 1;
3170 ctx.file_offset[TGSI_FILE_TEMPORARY] = ctx.file_offset[TGSI_FILE_OUTPUT] +
3171 ctx.info.file_max[TGSI_FILE_OUTPUT] + 1;
3172
3173 /* Outside the GPR range. This will be translated to one of the
3174 * kcache banks later. */
3175 ctx.file_offset[TGSI_FILE_CONSTANT] = 512;
3176
3177 ctx.file_offset[TGSI_FILE_IMMEDIATE] = V_SQ_ALU_SRC_LITERAL;
3178 ctx.bc->ar_reg = ctx.file_offset[TGSI_FILE_TEMPORARY] +
3179 ctx.info.file_max[TGSI_FILE_TEMPORARY] + 1;
3180 ctx.bc->index_reg[0] = ctx.bc->ar_reg + 1;
3181 ctx.bc->index_reg[1] = ctx.bc->ar_reg + 2;
3182
3183 if (ctx.type == PIPE_SHADER_TESS_CTRL) {
3184 ctx.tess_input_info = ctx.bc->ar_reg + 3;
3185 ctx.tess_output_info = ctx.bc->ar_reg + 4;
3186 ctx.temp_reg = ctx.bc->ar_reg + 5;
3187 } else if (ctx.type == PIPE_SHADER_TESS_EVAL) {
3188 ctx.tess_input_info = 0;
3189 ctx.tess_output_info = ctx.bc->ar_reg + 3;
3190 ctx.temp_reg = ctx.bc->ar_reg + 4;
3191 } else if (ctx.type == PIPE_SHADER_GEOMETRY) {
3192 ctx.gs_export_gpr_tregs[0] = ctx.bc->ar_reg + 3;
3193 ctx.gs_export_gpr_tregs[1] = ctx.bc->ar_reg + 4;
3194 ctx.gs_export_gpr_tregs[2] = ctx.bc->ar_reg + 5;
3195 ctx.gs_export_gpr_tregs[3] = ctx.bc->ar_reg + 6;
3196 ctx.temp_reg = ctx.bc->ar_reg + 7;
3197 if (ctx.shader->gs_tri_strip_adj_fix) {
3198 ctx.gs_rotated_input[0] = ctx.bc->ar_reg + 7;
3199 ctx.gs_rotated_input[1] = ctx.bc->ar_reg + 8;
3200 ctx.temp_reg += 2;
3201 } else {
3202 ctx.gs_rotated_input[0] = 0;
3203 ctx.gs_rotated_input[1] = 1;
3204 }
3205 } else {
3206 ctx.temp_reg = ctx.bc->ar_reg + 3;
3207 }
3208
3209 if (shader->uses_images && ctx.type == PIPE_SHADER_FRAGMENT) {
3210 ctx.thread_id_gpr = ctx.temp_reg;
3211 ctx.temp_reg++;
3212 } else
3213 ctx.thread_id_gpr = 0;
3214
3215 shader->max_arrays = 0;
3216 shader->num_arrays = 0;
3217 if (indirect_gprs) {
3218
3219 if (ctx.info.indirect_files & (1 << TGSI_FILE_INPUT)) {
3220 r600_add_gpr_array(shader, ctx.file_offset[TGSI_FILE_INPUT],
3221 ctx.file_offset[TGSI_FILE_OUTPUT] -
3222 ctx.file_offset[TGSI_FILE_INPUT],
3223 0x0F);
3224 }
3225 if (ctx.info.indirect_files & (1 << TGSI_FILE_OUTPUT)) {
3226 r600_add_gpr_array(shader, ctx.file_offset[TGSI_FILE_OUTPUT],
3227 ctx.file_offset[TGSI_FILE_TEMPORARY] -
3228 ctx.file_offset[TGSI_FILE_OUTPUT],
3229 0x0F);
3230 }
3231 }
3232
3233 ctx.nliterals = 0;
3234 ctx.literals = NULL;
3235
3236 shader->fs_write_all = ctx.info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] &&
3237 ctx.info.colors_written == 1;
3238 shader->vs_position_window_space = ctx.info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
3239 shader->ps_conservative_z = (uint8_t)ctx.info.properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT];
3240
3241 if (ctx.type == PIPE_SHADER_VERTEX ||
3242 ctx.type == PIPE_SHADER_GEOMETRY ||
3243 ctx.type == PIPE_SHADER_TESS_EVAL) {
3244 shader->cc_dist_mask = (1 << (ctx.info.properties[TGSI_PROPERTY_NUM_CULLDIST_ENABLED] +
3245 ctx.info.properties[TGSI_PROPERTY_NUM_CLIPDIST_ENABLED])) - 1;
3246 shader->clip_dist_write = (1 << ctx.info.properties[TGSI_PROPERTY_NUM_CLIPDIST_ENABLED]) - 1;
3247 shader->cull_dist_write = ((1 << ctx.info.properties[TGSI_PROPERTY_NUM_CULLDIST_ENABLED]) - 1) << ctx.info.properties[TGSI_PROPERTY_NUM_CLIPDIST_ENABLED];
3248 }
3249
3250 if (shader->vs_as_gs_a)
3251 vs_add_primid_output(&ctx, key.vs.prim_id_out);
3252
3253 if (ctx.type == PIPE_SHADER_TESS_EVAL)
3254 r600_fetch_tess_io_info(&ctx);
3255
3256 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
3257 tgsi_parse_token(&ctx.parse);
3258 switch (ctx.parse.FullToken.Token.Type) {
3259 case TGSI_TOKEN_TYPE_IMMEDIATE:
3260 immediate = &ctx.parse.FullToken.FullImmediate;
3261 ctx.literals = realloc(ctx.literals, (ctx.nliterals + 1) * 16);
3262 if(ctx.literals == NULL) {
3263 r = -ENOMEM;
3264 goto out_err;
3265 }
3266 ctx.literals[ctx.nliterals * 4 + 0] = immediate->u[0].Uint;
3267 ctx.literals[ctx.nliterals * 4 + 1] = immediate->u[1].Uint;
3268 ctx.literals[ctx.nliterals * 4 + 2] = immediate->u[2].Uint;
3269 ctx.literals[ctx.nliterals * 4 + 3] = immediate->u[3].Uint;
3270 ctx.nliterals++;
3271 break;
3272 case TGSI_TOKEN_TYPE_DECLARATION:
3273 r = tgsi_declaration(&ctx);
3274 if (r)
3275 goto out_err;
3276 break;
3277 case TGSI_TOKEN_TYPE_INSTRUCTION:
3278 case TGSI_TOKEN_TYPE_PROPERTY:
3279 break;
3280 default:
3281 R600_ERR("unsupported token type %d\n", ctx.parse.FullToken.Token.Type);
3282 r = -EINVAL;
3283 goto out_err;
3284 }
3285 }
3286
3287 shader->ring_item_sizes[0] = ctx.next_ring_offset;
3288 shader->ring_item_sizes[1] = 0;
3289 shader->ring_item_sizes[2] = 0;
3290 shader->ring_item_sizes[3] = 0;
3291
3292 /* Process two side if needed */
3293 if (shader->two_side && ctx.colors_used) {
3294 int i, count = ctx.shader->ninput;
3295 unsigned next_lds_loc = ctx.shader->nlds;
3296
3297 /* additional inputs will be allocated right after the existing inputs,
3298 * we won't need them after the color selection, so we don't need to
3299 * reserve these gprs for the rest of the shader code and to adjust
3300 * output offsets etc. */
3301 int gpr = ctx.file_offset[TGSI_FILE_INPUT] +
3302 ctx.info.file_max[TGSI_FILE_INPUT] + 1;
3303
3304 /* if two sided and neither face or sample mask is used by shader, ensure face_gpr is emitted */
3305 if (ctx.face_gpr == -1) {
3306 i = ctx.shader->ninput++;
3307 ctx.shader->input[i].name = TGSI_SEMANTIC_FACE;
3308 ctx.shader->input[i].spi_sid = 0;
3309 ctx.shader->input[i].gpr = gpr++;
3310 ctx.face_gpr = ctx.shader->input[i].gpr;
3311 }
3312
3313 for (i = 0; i < count; i++) {
3314 if (ctx.shader->input[i].name == TGSI_SEMANTIC_COLOR) {
3315 int ni = ctx.shader->ninput++;
3316 memcpy(&ctx.shader->input[ni],&ctx.shader->input[i], sizeof(struct r600_shader_io));
3317 ctx.shader->input[ni].name = TGSI_SEMANTIC_BCOLOR;
3318 ctx.shader->input[ni].spi_sid = r600_spi_sid(&ctx.shader->input[ni]);
3319 ctx.shader->input[ni].gpr = gpr++;
3320 // TGSI to LLVM needs to know the lds position of inputs.
3321 // Non LLVM path computes it later (in process_twoside_color)
3322 ctx.shader->input[ni].lds_pos = next_lds_loc++;
3323 ctx.shader->input[i].back_color_input = ni;
3324 if (ctx.bc->chip_class >= EVERGREEN) {
3325 if ((r = evergreen_interp_input(&ctx, ni)))
3326 return r;
3327 }
3328 }
3329 }
3330 }
3331
3332 if (shader->fs_write_all && rscreen->b.chip_class >= EVERGREEN)
3333 shader->nr_ps_max_color_exports = 8;
3334
3335 if (ctx.fragcoord_input >= 0) {
3336 if (ctx.bc->chip_class == CAYMAN) {
3337 for (j = 0 ; j < 4; j++) {
3338 struct r600_bytecode_alu alu;
3339 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3340 alu.op = ALU_OP1_RECIP_IEEE;
3341 alu.src[0].sel = shader->input[ctx.fragcoord_input].gpr;
3342 alu.src[0].chan = 3;
3343
3344 alu.dst.sel = shader->input[ctx.fragcoord_input].gpr;
3345 alu.dst.chan = j;
3346 alu.dst.write = (j == 3);
3347 alu.last = 1;
3348 if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
3349 return r;
3350 }
3351 } else {
3352 struct r600_bytecode_alu alu;
3353 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3354 alu.op = ALU_OP1_RECIP_IEEE;
3355 alu.src[0].sel = shader->input[ctx.fragcoord_input].gpr;
3356 alu.src[0].chan = 3;
3357
3358 alu.dst.sel = shader->input[ctx.fragcoord_input].gpr;
3359 alu.dst.chan = 3;
3360 alu.dst.write = 1;
3361 alu.last = 1;
3362 if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
3363 return r;
3364 }
3365 }
3366
3367 if (ctx.thread_id_gpr) {
3368 load_thread_id_gpr(&ctx);
3369 }
3370
3371 if (ctx.type == PIPE_SHADER_GEOMETRY) {
3372 struct r600_bytecode_alu alu;
3373 int r;
3374
3375 /* GS thread with no output workaround - emit a cut at start of GS */
3376 if (ctx.bc->chip_class == R600)
3377 r600_bytecode_add_cfinst(ctx.bc, CF_OP_CUT_VERTEX);
3378
3379 for (j = 0; j < 4; j++) {
3380 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3381 alu.op = ALU_OP1_MOV;
3382 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
3383 alu.src[0].value = 0;
3384 alu.dst.sel = ctx.gs_export_gpr_tregs[j];
3385 alu.dst.write = 1;
3386 alu.last = 1;
3387 r = r600_bytecode_add_alu(ctx.bc, &alu);
3388 if (r)
3389 return r;
3390 }
3391
3392 if (ctx.shader->gs_tri_strip_adj_fix) {
3393 r = single_alu_op2(&ctx, ALU_OP2_AND_INT,
3394 ctx.gs_rotated_input[0], 2,
3395 0, 2,
3396 V_SQ_ALU_SRC_LITERAL, 1);
3397 if (r)
3398 return r;
3399
3400 for (i = 0; i < 6; i++) {
3401 int rotated = (i + 4) % 6;
3402 int offset_reg = i / 3;
3403 int offset_chan = i % 3;
3404 int rotated_offset_reg = rotated / 3;
3405 int rotated_offset_chan = rotated % 3;
3406
3407 if (offset_reg == 0 && offset_chan == 2)
3408 offset_chan = 3;
3409 if (rotated_offset_reg == 0 && rotated_offset_chan == 2)
3410 rotated_offset_chan = 3;
3411
3412 r = single_alu_op3(&ctx, ALU_OP3_CNDE_INT,
3413 ctx.gs_rotated_input[offset_reg], offset_chan,
3414 ctx.gs_rotated_input[0], 2,
3415 offset_reg, offset_chan,
3416 rotated_offset_reg, rotated_offset_chan);
3417 if (r)
3418 return r;
3419 }
3420 }
3421 }
3422
3423 if (ctx.type == PIPE_SHADER_TESS_CTRL)
3424 r600_fetch_tess_io_info(&ctx);
3425
3426 if (shader->two_side && ctx.colors_used) {
3427 if ((r = process_twoside_color_inputs(&ctx)))
3428 return r;
3429 }
3430
3431 tgsi_parse_init(&ctx.parse, tokens);
3432 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
3433 tgsi_parse_token(&ctx.parse);
3434 switch (ctx.parse.FullToken.Token.Type) {
3435 case TGSI_TOKEN_TYPE_INSTRUCTION:
3436 r = tgsi_is_supported(&ctx);
3437 if (r)
3438 goto out_err;
3439 ctx.max_driver_temp_used = 0;
3440 /* reserve first tmp for everyone */
3441 r600_get_temp(&ctx);
3442
3443 opcode = ctx.parse.FullToken.FullInstruction.Instruction.Opcode;
3444 if ((r = tgsi_split_constant(&ctx)))
3445 goto out_err;
3446 if ((r = tgsi_split_literal_constant(&ctx)))
3447 goto out_err;
3448 if (ctx.type == PIPE_SHADER_GEOMETRY) {
3449 if ((r = tgsi_split_gs_inputs(&ctx)))
3450 goto out_err;
3451 } else if (lds_inputs) {
3452 if ((r = tgsi_split_lds_inputs(&ctx)))
3453 goto out_err;
3454 }
3455 if (ctx.bc->chip_class == CAYMAN)
3456 ctx.inst_info = &cm_shader_tgsi_instruction[opcode];
3457 else if (ctx.bc->chip_class >= EVERGREEN)
3458 ctx.inst_info = &eg_shader_tgsi_instruction[opcode];
3459 else
3460 ctx.inst_info = &r600_shader_tgsi_instruction[opcode];
3461 r = ctx.inst_info->process(&ctx);
3462 if (r)
3463 goto out_err;
3464
3465 if (ctx.type == PIPE_SHADER_TESS_CTRL) {
3466 r = r600_store_tcs_output(&ctx);
3467 if (r)
3468 goto out_err;
3469 }
3470 break;
3471 default:
3472 break;
3473 }
3474 }
3475
3476 /* Reset the temporary register counter. */
3477 ctx.max_driver_temp_used = 0;
3478
3479 noutput = shader->noutput;
3480
3481 if (!ring_outputs && ctx.clip_vertex_write) {
3482 unsigned clipdist_temp[2];
3483
3484 clipdist_temp[0] = r600_get_temp(&ctx);
3485 clipdist_temp[1] = r600_get_temp(&ctx);
3486
3487 /* need to convert a clipvertex write into clipdistance writes and not export
3488 the clip vertex anymore */
3489
3490 memset(&shader->output[noutput], 0, 2*sizeof(struct r600_shader_io));
3491 shader->output[noutput].name = TGSI_SEMANTIC_CLIPDIST;
3492 shader->output[noutput].gpr = clipdist_temp[0];
3493 noutput++;
3494 shader->output[noutput].name = TGSI_SEMANTIC_CLIPDIST;
3495 shader->output[noutput].gpr = clipdist_temp[1];
3496 noutput++;
3497
3498 /* reset spi_sid for clipvertex output to avoid confusing spi */
3499 shader->output[ctx.cv_output].spi_sid = 0;
3500
3501 shader->clip_dist_write = 0xFF;
3502 shader->cc_dist_mask = 0xFF;
3503
3504 for (i = 0; i < 8; i++) {
3505 int oreg = i >> 2;
3506 int ochan = i & 3;
3507
3508 for (j = 0; j < 4; j++) {
3509 struct r600_bytecode_alu alu;
3510 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3511 alu.op = ALU_OP2_DOT4;
3512 alu.src[0].sel = shader->output[ctx.cv_output].gpr;
3513 alu.src[0].chan = j;
3514
3515 alu.src[1].sel = 512 + i;
3516 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
3517 alu.src[1].chan = j;
3518
3519 alu.dst.sel = clipdist_temp[oreg];
3520 alu.dst.chan = j;
3521 alu.dst.write = (j == ochan);
3522 if (j == 3)
3523 alu.last = 1;
3524 r = r600_bytecode_add_alu(ctx.bc, &alu);
3525 if (r)
3526 return r;
3527 }
3528 }
3529 }
3530
3531 /* Add stream outputs. */
3532 if (so.num_outputs) {
3533 bool emit = false;
3534 if (!lds_outputs && !ring_outputs && ctx.type == PIPE_SHADER_VERTEX)
3535 emit = true;
3536 if (!ring_outputs && ctx.type == PIPE_SHADER_TESS_EVAL)
3537 emit = true;
3538 if (emit)
3539 emit_streamout(&ctx, &so, -1, NULL);
3540 }
3541 pipeshader->enabled_stream_buffers_mask = ctx.enabled_stream_buffers_mask;
3542 convert_edgeflag_to_int(&ctx);
3543
3544 if (ctx.type == PIPE_SHADER_TESS_CTRL)
3545 r600_emit_tess_factor(&ctx);
3546
3547 if (lds_outputs) {
3548 if (ctx.type == PIPE_SHADER_VERTEX) {
3549 if (ctx.shader->noutput)
3550 emit_lds_vs_writes(&ctx);
3551 }
3552 } else if (ring_outputs) {
3553 if (shader->vs_as_es || shader->tes_as_es) {
3554 ctx.gs_export_gpr_tregs[0] = r600_get_temp(&ctx);
3555 ctx.gs_export_gpr_tregs[1] = -1;
3556 ctx.gs_export_gpr_tregs[2] = -1;
3557 ctx.gs_export_gpr_tregs[3] = -1;
3558
3559 emit_gs_ring_writes(&ctx, &so, -1, FALSE);
3560 }
3561 } else {
3562 /* Export output */
3563 next_clip_base = shader->vs_out_misc_write ? 62 : 61;
3564
3565 for (i = 0, j = 0; i < noutput; i++, j++) {
3566 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
3567 output[j].gpr = shader->output[i].gpr;
3568 output[j].elem_size = 3;
3569 output[j].swizzle_x = 0;
3570 output[j].swizzle_y = 1;
3571 output[j].swizzle_z = 2;
3572 output[j].swizzle_w = 3;
3573 output[j].burst_count = 1;
3574 output[j].type = -1;
3575 output[j].op = CF_OP_EXPORT;
3576 switch (ctx.type) {
3577 case PIPE_SHADER_VERTEX:
3578 case PIPE_SHADER_TESS_EVAL:
3579 switch (shader->output[i].name) {
3580 case TGSI_SEMANTIC_POSITION:
3581 output[j].array_base = 60;
3582 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
3583 pos_emitted = true;
3584 break;
3585
3586 case TGSI_SEMANTIC_PSIZE:
3587 output[j].array_base = 61;
3588 output[j].swizzle_y = 7;
3589 output[j].swizzle_z = 7;
3590 output[j].swizzle_w = 7;
3591 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
3592 pos_emitted = true;
3593 break;
3594 case TGSI_SEMANTIC_EDGEFLAG:
3595 output[j].array_base = 61;
3596 output[j].swizzle_x = 7;
3597 output[j].swizzle_y = 0;
3598 output[j].swizzle_z = 7;
3599 output[j].swizzle_w = 7;
3600 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
3601 pos_emitted = true;
3602 break;
3603 case TGSI_SEMANTIC_LAYER:
3604 /* spi_sid is 0 for outputs that are
3605 * not consumed by PS */
3606 if (shader->output[i].spi_sid) {
3607 output[j].array_base = next_param_base++;
3608 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
3609 j++;
3610 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
3611 }
3612 output[j].array_base = 61;
3613 output[j].swizzle_x = 7;
3614 output[j].swizzle_y = 7;
3615 output[j].swizzle_z = 0;
3616 output[j].swizzle_w = 7;
3617 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
3618 pos_emitted = true;
3619 break;
3620 case TGSI_SEMANTIC_VIEWPORT_INDEX:
3621 /* spi_sid is 0 for outputs that are
3622 * not consumed by PS */
3623 if (shader->output[i].spi_sid) {
3624 output[j].array_base = next_param_base++;
3625 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
3626 j++;
3627 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
3628 }
3629 output[j].array_base = 61;
3630 output[j].swizzle_x = 7;
3631 output[j].swizzle_y = 7;
3632 output[j].swizzle_z = 7;
3633 output[j].swizzle_w = 0;
3634 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
3635 pos_emitted = true;
3636 break;
3637 case TGSI_SEMANTIC_CLIPVERTEX:
3638 j--;
3639 break;
3640 case TGSI_SEMANTIC_CLIPDIST:
3641 output[j].array_base = next_clip_base++;
3642 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
3643 pos_emitted = true;
3644 /* spi_sid is 0 for clipdistance outputs that were generated
3645 * for clipvertex - we don't need to pass them to PS */
3646 if (shader->output[i].spi_sid) {
3647 j++;
3648 /* duplicate it as PARAM to pass to the pixel shader */
3649 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
3650 output[j].array_base = next_param_base++;
3651 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
3652 }
3653 break;
3654 case TGSI_SEMANTIC_FOG:
3655 output[j].swizzle_y = 4; /* 0 */
3656 output[j].swizzle_z = 4; /* 0 */
3657 output[j].swizzle_w = 5; /* 1 */
3658 break;
3659 case TGSI_SEMANTIC_PRIMID:
3660 output[j].swizzle_x = 2;
3661 output[j].swizzle_y = 4; /* 0 */
3662 output[j].swizzle_z = 4; /* 0 */
3663 output[j].swizzle_w = 4; /* 0 */
3664 break;
3665 }
3666
3667 break;
3668 case PIPE_SHADER_FRAGMENT:
3669 if (shader->output[i].name == TGSI_SEMANTIC_COLOR) {
3670 /* never export more colors than the number of CBs */
3671 if (shader->output[i].sid >= max_color_exports) {
3672 /* skip export */
3673 j--;
3674 continue;
3675 }
3676 output[j].swizzle_w = key.ps.alpha_to_one ? 5 : 3;
3677 output[j].array_base = shader->output[i].sid;
3678 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
3679 shader->nr_ps_color_exports++;
3680 if (shader->fs_write_all && (rscreen->b.chip_class >= EVERGREEN)) {
3681 for (k = 1; k < max_color_exports; k++) {
3682 j++;
3683 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
3684 output[j].gpr = shader->output[i].gpr;
3685 output[j].elem_size = 3;
3686 output[j].swizzle_x = 0;
3687 output[j].swizzle_y = 1;
3688 output[j].swizzle_z = 2;
3689 output[j].swizzle_w = key.ps.alpha_to_one ? 5 : 3;
3690 output[j].burst_count = 1;
3691 output[j].array_base = k;
3692 output[j].op = CF_OP_EXPORT;
3693 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
3694 shader->nr_ps_color_exports++;
3695 }
3696 }
3697 } else if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
3698 output[j].array_base = 61;
3699 output[j].swizzle_x = 2;
3700 output[j].swizzle_y = 7;
3701 output[j].swizzle_z = output[j].swizzle_w = 7;
3702 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
3703 } else if (shader->output[i].name == TGSI_SEMANTIC_STENCIL) {
3704 output[j].array_base = 61;
3705 output[j].swizzle_x = 7;
3706 output[j].swizzle_y = 1;
3707 output[j].swizzle_z = output[j].swizzle_w = 7;
3708 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
3709 } else if (shader->output[i].name == TGSI_SEMANTIC_SAMPLEMASK) {
3710 output[j].array_base = 61;
3711 output[j].swizzle_x = 7;
3712 output[j].swizzle_y = 7;
3713 output[j].swizzle_z = 0;
3714 output[j].swizzle_w = 7;
3715 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
3716 } else {
3717 R600_ERR("unsupported fragment output name %d\n", shader->output[i].name);
3718 r = -EINVAL;
3719 goto out_err;
3720 }
3721 break;
3722 case PIPE_SHADER_TESS_CTRL:
3723 break;
3724 default:
3725 R600_ERR("unsupported processor type %d\n", ctx.type);
3726 r = -EINVAL;
3727 goto out_err;
3728 }
3729
3730 if (output[j].type==-1) {
3731 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
3732 output[j].array_base = next_param_base++;
3733 }
3734 }
3735
3736 /* add fake position export */
3737 if ((ctx.type == PIPE_SHADER_VERTEX || ctx.type == PIPE_SHADER_TESS_EVAL) && pos_emitted == false) {
3738 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
3739 output[j].gpr = 0;
3740 output[j].elem_size = 3;
3741 output[j].swizzle_x = 7;
3742 output[j].swizzle_y = 7;
3743 output[j].swizzle_z = 7;
3744 output[j].swizzle_w = 7;
3745 output[j].burst_count = 1;
3746 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
3747 output[j].array_base = 60;
3748 output[j].op = CF_OP_EXPORT;
3749 j++;
3750 }
3751
3752 /* add fake param output for vertex shader if no param is exported */
3753 if ((ctx.type == PIPE_SHADER_VERTEX || ctx.type == PIPE_SHADER_TESS_EVAL) && next_param_base == 0) {
3754 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
3755 output[j].gpr = 0;
3756 output[j].elem_size = 3;
3757 output[j].swizzle_x = 7;
3758 output[j].swizzle_y = 7;
3759 output[j].swizzle_z = 7;
3760 output[j].swizzle_w = 7;
3761 output[j].burst_count = 1;
3762 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
3763 output[j].array_base = 0;
3764 output[j].op = CF_OP_EXPORT;
3765 j++;
3766 }
3767
3768 /* add fake pixel export */
3769 if (ctx.type == PIPE_SHADER_FRAGMENT && shader->nr_ps_color_exports == 0) {
3770 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
3771 output[j].gpr = 0;
3772 output[j].elem_size = 3;
3773 output[j].swizzle_x = 7;
3774 output[j].swizzle_y = 7;
3775 output[j].swizzle_z = 7;
3776 output[j].swizzle_w = 7;
3777 output[j].burst_count = 1;
3778 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
3779 output[j].array_base = 0;
3780 output[j].op = CF_OP_EXPORT;
3781 j++;
3782 shader->nr_ps_color_exports++;
3783 }
3784
3785 noutput = j;
3786
3787 /* set export done on last export of each type */
3788 for (i = noutput - 1, output_done = 0; i >= 0; i--) {
3789 if (!(output_done & (1 << output[i].type))) {
3790 output_done |= (1 << output[i].type);
3791 output[i].op = CF_OP_EXPORT_DONE;
3792 }
3793 }
3794 /* add output to bytecode */
3795 for (i = 0; i < noutput; i++) {
3796 r = r600_bytecode_add_output(ctx.bc, &output[i]);
3797 if (r)
3798 goto out_err;
3799 }
3800 }
3801
3802 /* add program end */
3803 if (ctx.bc->chip_class == CAYMAN)
3804 cm_bytecode_add_cf_end(ctx.bc);
3805 else {
3806 const struct cf_op_info *last = NULL;
3807
3808 if (ctx.bc->cf_last)
3809 last = r600_isa_cf(ctx.bc->cf_last->op);
3810
3811 /* alu clause instructions don't have EOP bit, so add NOP */
3812 if (!last || last->flags & CF_ALU)
3813 r600_bytecode_add_cfinst(ctx.bc, CF_OP_NOP);
3814
3815 ctx.bc->cf_last->end_of_program = 1;
3816 }
3817
3818 /* check GPR limit - we have 124 = 128 - 4
3819 * (4 are reserved as alu clause temporary registers) */
3820 if (ctx.bc->ngpr > 124) {
3821 R600_ERR("GPR limit exceeded - shader requires %d registers\n", ctx.bc->ngpr);
3822 r = -ENOMEM;
3823 goto out_err;
3824 }
3825
3826 if (ctx.type == PIPE_SHADER_GEOMETRY) {
3827 if ((r = generate_gs_copy_shader(rctx, pipeshader, &so)))
3828 return r;
3829 }
3830
3831 free(ctx.literals);
3832 tgsi_parse_free(&ctx.parse);
3833 return 0;
3834 out_err:
3835 free(ctx.literals);
3836 tgsi_parse_free(&ctx.parse);
3837 return r;
3838 }
3839
3840 static int tgsi_unsupported(struct r600_shader_ctx *ctx)
3841 {
3842 const unsigned tgsi_opcode =
3843 ctx->parse.FullToken.FullInstruction.Instruction.Opcode;
3844 R600_ERR("%s tgsi opcode unsupported\n",
3845 tgsi_get_opcode_name(tgsi_opcode));
3846 return -EINVAL;
3847 }
3848
3849 static int tgsi_end(struct r600_shader_ctx *ctx)
3850 {
3851 return 0;
3852 }
3853
3854 static void r600_bytecode_src(struct r600_bytecode_alu_src *bc_src,
3855 const struct r600_shader_src *shader_src,
3856 unsigned chan)
3857 {
3858 bc_src->sel = shader_src->sel;
3859 bc_src->chan = shader_src->swizzle[chan];
3860 bc_src->neg = shader_src->neg;
3861 bc_src->abs = shader_src->abs;
3862 bc_src->rel = shader_src->rel;
3863 bc_src->value = shader_src->value[bc_src->chan];
3864 bc_src->kc_bank = shader_src->kc_bank;
3865 bc_src->kc_rel = shader_src->kc_rel;
3866 }
3867
3868 static void r600_bytecode_src_set_abs(struct r600_bytecode_alu_src *bc_src)
3869 {
3870 bc_src->abs = 1;
3871 bc_src->neg = 0;
3872 }
3873
3874 static void r600_bytecode_src_toggle_neg(struct r600_bytecode_alu_src *bc_src)
3875 {
3876 bc_src->neg = !bc_src->neg;
3877 }
3878
3879 static void tgsi_dst(struct r600_shader_ctx *ctx,
3880 const struct tgsi_full_dst_register *tgsi_dst,
3881 unsigned swizzle,
3882 struct r600_bytecode_alu_dst *r600_dst)
3883 {
3884 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3885
3886 r600_dst->sel = tgsi_dst->Register.Index;
3887 r600_dst->sel += ctx->file_offset[tgsi_dst->Register.File];
3888 r600_dst->chan = swizzle;
3889 r600_dst->write = 1;
3890 if (inst->Instruction.Saturate) {
3891 r600_dst->clamp = 1;
3892 }
3893 if (ctx->type == PIPE_SHADER_TESS_CTRL) {
3894 if (tgsi_dst->Register.File == TGSI_FILE_OUTPUT) {
3895 return;
3896 }
3897 }
3898 if (tgsi_dst->Register.Indirect)
3899 r600_dst->rel = V_SQ_REL_RELATIVE;
3900
3901 }
3902
3903 static int tgsi_op2_64_params(struct r600_shader_ctx *ctx, bool singledest, bool swap)
3904 {
3905 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3906 unsigned write_mask = inst->Dst[0].Register.WriteMask;
3907 struct r600_bytecode_alu alu;
3908 int i, j, r, lasti = tgsi_last_instruction(write_mask);
3909 int use_tmp = 0;
3910
3911 if (singledest) {
3912 switch (write_mask) {
3913 case 0x1:
3914 write_mask = 0x3;
3915 break;
3916 case 0x2:
3917 use_tmp = 1;
3918 write_mask = 0x3;
3919 break;
3920 case 0x4:
3921 write_mask = 0xc;
3922 break;
3923 case 0x8:
3924 write_mask = 0xc;
3925 use_tmp = 3;
3926 break;
3927 }
3928 }
3929
3930 lasti = tgsi_last_instruction(write_mask);
3931 for (i = 0; i <= lasti; i++) {
3932
3933 if (!(write_mask & (1 << i)))
3934 continue;
3935
3936 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3937
3938 if (singledest) {
3939 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3940 if (use_tmp) {
3941 alu.dst.sel = ctx->temp_reg;
3942 alu.dst.chan = i;
3943 alu.dst.write = 1;
3944 }
3945 if (i == 1 || i == 3)
3946 alu.dst.write = 0;
3947 } else
3948 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3949
3950 alu.op = ctx->inst_info->op;
3951 if (ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DABS) {
3952 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
3953 } else if (!swap) {
3954 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
3955 r600_bytecode_src(&alu.src[j], &ctx->src[j], fp64_switch(i));
3956 }
3957 } else {
3958 r600_bytecode_src(&alu.src[0], &ctx->src[1], fp64_switch(i));
3959 r600_bytecode_src(&alu.src[1], &ctx->src[0], fp64_switch(i));
3960 }
3961
3962 /* handle some special cases */
3963 if (i == 1 || i == 3) {
3964 switch (ctx->parse.FullToken.FullInstruction.Instruction.Opcode) {
3965 case TGSI_OPCODE_DABS:
3966 r600_bytecode_src_set_abs(&alu.src[0]);
3967 break;
3968 default:
3969 break;
3970 }
3971 }
3972 if (i == lasti) {
3973 alu.last = 1;
3974 }
3975 r = r600_bytecode_add_alu(ctx->bc, &alu);
3976 if (r)
3977 return r;
3978 }
3979
3980 if (use_tmp) {
3981 write_mask = inst->Dst[0].Register.WriteMask;
3982
3983 /* move result from temp to dst */
3984 for (i = 0; i <= lasti; i++) {
3985 if (!(write_mask & (1 << i)))
3986 continue;
3987
3988 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3989 alu.op = ALU_OP1_MOV;
3990 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3991 alu.src[0].sel = ctx->temp_reg;
3992 alu.src[0].chan = use_tmp - 1;
3993 alu.last = (i == lasti);
3994
3995 r = r600_bytecode_add_alu(ctx->bc, &alu);
3996 if (r)
3997 return r;
3998 }
3999 }
4000 return 0;
4001 }
4002
4003 static int tgsi_op2_64(struct r600_shader_ctx *ctx)
4004 {
4005 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4006 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4007 /* confirm writemasking */
4008 if ((write_mask & 0x3) != 0x3 &&
4009 (write_mask & 0xc) != 0xc) {
4010 fprintf(stderr, "illegal writemask for 64-bit: 0x%x\n", write_mask);
4011 return -1;
4012 }
4013 return tgsi_op2_64_params(ctx, false, false);
4014 }
4015
4016 static int tgsi_op2_64_single_dest(struct r600_shader_ctx *ctx)
4017 {
4018 return tgsi_op2_64_params(ctx, true, false);
4019 }
4020
4021 static int tgsi_op2_64_single_dest_s(struct r600_shader_ctx *ctx)
4022 {
4023 return tgsi_op2_64_params(ctx, true, true);
4024 }
4025
4026 static int tgsi_op3_64(struct r600_shader_ctx *ctx)
4027 {
4028 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4029 struct r600_bytecode_alu alu;
4030 int i, j, r;
4031 int lasti = 3;
4032 int tmp = r600_get_temp(ctx);
4033
4034 for (i = 0; i < lasti + 1; i++) {
4035
4036 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4037 alu.op = ctx->inst_info->op;
4038 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4039 r600_bytecode_src(&alu.src[j], &ctx->src[j], i == 3 ? 0 : 1);
4040 }
4041
4042 if (inst->Dst[0].Register.WriteMask & (1 << i))
4043 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4044 else
4045 alu.dst.sel = tmp;
4046
4047 alu.dst.chan = i;
4048 alu.is_op3 = 1;
4049 if (i == lasti) {
4050 alu.last = 1;
4051 }
4052 r = r600_bytecode_add_alu(ctx->bc, &alu);
4053 if (r)
4054 return r;
4055 }
4056 return 0;
4057 }
4058
4059 static int tgsi_op2_s(struct r600_shader_ctx *ctx, int swap, int trans_only)
4060 {
4061 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4062 struct r600_bytecode_alu alu;
4063 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4064 int i, j, r, lasti = tgsi_last_instruction(write_mask);
4065 /* use temp register if trans_only and more than one dst component */
4066 int use_tmp = trans_only && (write_mask ^ (1 << lasti));
4067 unsigned op = ctx->inst_info->op;
4068
4069 if (op == ALU_OP2_MUL_IEEE &&
4070 ctx->info.properties[TGSI_PROPERTY_MUL_ZERO_WINS])
4071 op = ALU_OP2_MUL;
4072
4073 for (i = 0; i <= lasti; i++) {
4074 if (!(write_mask & (1 << i)))
4075 continue;
4076
4077 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4078 if (use_tmp) {
4079 alu.dst.sel = ctx->temp_reg;
4080 alu.dst.chan = i;
4081 alu.dst.write = 1;
4082 } else
4083 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4084
4085 alu.op = op;
4086 if (!swap) {
4087 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4088 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
4089 }
4090 } else {
4091 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
4092 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4093 }
4094 if (i == lasti || trans_only) {
4095 alu.last = 1;
4096 }
4097 r = r600_bytecode_add_alu(ctx->bc, &alu);
4098 if (r)
4099 return r;
4100 }
4101
4102 if (use_tmp) {
4103 /* move result from temp to dst */
4104 for (i = 0; i <= lasti; i++) {
4105 if (!(write_mask & (1 << i)))
4106 continue;
4107
4108 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4109 alu.op = ALU_OP1_MOV;
4110 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4111 alu.src[0].sel = ctx->temp_reg;
4112 alu.src[0].chan = i;
4113 alu.last = (i == lasti);
4114
4115 r = r600_bytecode_add_alu(ctx->bc, &alu);
4116 if (r)
4117 return r;
4118 }
4119 }
4120 return 0;
4121 }
4122
4123 static int tgsi_op2(struct r600_shader_ctx *ctx)
4124 {
4125 return tgsi_op2_s(ctx, 0, 0);
4126 }
4127
4128 static int tgsi_op2_swap(struct r600_shader_ctx *ctx)
4129 {
4130 return tgsi_op2_s(ctx, 1, 0);
4131 }
4132
4133 static int tgsi_op2_trans(struct r600_shader_ctx *ctx)
4134 {
4135 return tgsi_op2_s(ctx, 0, 1);
4136 }
4137
4138 static int tgsi_ineg(struct r600_shader_ctx *ctx)
4139 {
4140 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4141 struct r600_bytecode_alu alu;
4142 int i, r;
4143 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4144
4145 for (i = 0; i < lasti + 1; i++) {
4146
4147 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4148 continue;
4149 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4150 alu.op = ctx->inst_info->op;
4151
4152 alu.src[0].sel = V_SQ_ALU_SRC_0;
4153
4154 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4155
4156 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4157
4158 if (i == lasti) {
4159 alu.last = 1;
4160 }
4161 r = r600_bytecode_add_alu(ctx->bc, &alu);
4162 if (r)
4163 return r;
4164 }
4165 return 0;
4166
4167 }
4168
4169 static int tgsi_dneg(struct r600_shader_ctx *ctx)
4170 {
4171 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4172 struct r600_bytecode_alu alu;
4173 int i, r;
4174 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4175
4176 for (i = 0; i < lasti + 1; i++) {
4177
4178 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4179 continue;
4180 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4181 alu.op = ALU_OP1_MOV;
4182
4183 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4184
4185 if (i == 1 || i == 3)
4186 r600_bytecode_src_toggle_neg(&alu.src[0]);
4187 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4188
4189 if (i == lasti) {
4190 alu.last = 1;
4191 }
4192 r = r600_bytecode_add_alu(ctx->bc, &alu);
4193 if (r)
4194 return r;
4195 }
4196 return 0;
4197
4198 }
4199
4200 static int tgsi_dfracexp(struct r600_shader_ctx *ctx)
4201 {
4202 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4203 struct r600_bytecode_alu alu;
4204 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4205 int i, j, r;
4206
4207 for (i = 0; i <= 3; i++) {
4208 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4209 alu.op = ctx->inst_info->op;
4210
4211 alu.dst.sel = ctx->temp_reg;
4212 alu.dst.chan = i;
4213 alu.dst.write = 1;
4214 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4215 r600_bytecode_src(&alu.src[j], &ctx->src[j], fp64_switch(i));
4216 }
4217
4218 if (i == 3)
4219 alu.last = 1;
4220
4221 r = r600_bytecode_add_alu(ctx->bc, &alu);
4222 if (r)
4223 return r;
4224 }
4225
4226 /* Replicate significand result across channels. */
4227 for (i = 0; i <= 3; i++) {
4228 if (!(write_mask & (1 << i)))
4229 continue;
4230
4231 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4232 alu.op = ALU_OP1_MOV;
4233 alu.src[0].chan = (i & 1) + 2;
4234 alu.src[0].sel = ctx->temp_reg;
4235
4236 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4237 alu.dst.write = 1;
4238 alu.last = 1;
4239 r = r600_bytecode_add_alu(ctx->bc, &alu);
4240 if (r)
4241 return r;
4242 }
4243
4244 for (i = 0; i <= 3; i++) {
4245 if (inst->Dst[1].Register.WriteMask & (1 << i)) {
4246 /* MOV third channels to writemask dst1 */
4247 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4248 alu.op = ALU_OP1_MOV;
4249 alu.src[0].chan = 1;
4250 alu.src[0].sel = ctx->temp_reg;
4251
4252 tgsi_dst(ctx, &inst->Dst[1], i, &alu.dst);
4253 alu.last = 1;
4254 r = r600_bytecode_add_alu(ctx->bc, &alu);
4255 if (r)
4256 return r;
4257 break;
4258 }
4259 }
4260 return 0;
4261 }
4262
4263
4264 static int egcm_int_to_double(struct r600_shader_ctx *ctx)
4265 {
4266 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4267 struct r600_bytecode_alu alu;
4268 int i, r;
4269 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4270
4271 assert(inst->Instruction.Opcode == TGSI_OPCODE_I2D ||
4272 inst->Instruction.Opcode == TGSI_OPCODE_U2D);
4273
4274 for (i = 0; i <= (lasti+1)/2; i++) {
4275 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4276 alu.op = ctx->inst_info->op;
4277
4278 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4279 alu.dst.sel = ctx->temp_reg;
4280 alu.dst.chan = i;
4281 alu.dst.write = 1;
4282 alu.last = 1;
4283
4284 r = r600_bytecode_add_alu(ctx->bc, &alu);
4285 if (r)
4286 return r;
4287 }
4288
4289 for (i = 0; i <= lasti; i++) {
4290 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4291 alu.op = ALU_OP1_FLT32_TO_FLT64;
4292
4293 alu.src[0].chan = i/2;
4294 if (i%2 == 0)
4295 alu.src[0].sel = ctx->temp_reg;
4296 else {
4297 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
4298 alu.src[0].value = 0x0;
4299 }
4300 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4301 alu.last = i == lasti;
4302
4303 r = r600_bytecode_add_alu(ctx->bc, &alu);
4304 if (r)
4305 return r;
4306 }
4307
4308 return 0;
4309 }
4310
4311 static int egcm_double_to_int(struct r600_shader_ctx *ctx)
4312 {
4313 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4314 struct r600_bytecode_alu alu;
4315 int i, r;
4316 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4317
4318 assert(inst->Instruction.Opcode == TGSI_OPCODE_D2I ||
4319 inst->Instruction.Opcode == TGSI_OPCODE_D2U);
4320
4321 for (i = 0; i <= lasti; i++) {
4322 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4323 alu.op = ALU_OP1_FLT64_TO_FLT32;
4324
4325 r600_bytecode_src(&alu.src[0], &ctx->src[0], fp64_switch(i));
4326 alu.dst.chan = i;
4327 alu.dst.sel = ctx->temp_reg;
4328 alu.dst.write = i%2 == 0;
4329 alu.last = i == lasti;
4330
4331 r = r600_bytecode_add_alu(ctx->bc, &alu);
4332 if (r)
4333 return r;
4334 }
4335
4336 for (i = 0; i <= (lasti+1)/2; i++) {
4337 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4338 alu.op = ctx->inst_info->op;
4339
4340 alu.src[0].chan = i*2;
4341 alu.src[0].sel = ctx->temp_reg;
4342 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
4343 alu.last = 1;
4344
4345 r = r600_bytecode_add_alu(ctx->bc, &alu);
4346 if (r)
4347 return r;
4348 }
4349
4350 return 0;
4351 }
4352
4353 static int cayman_emit_unary_double_raw(struct r600_bytecode *bc,
4354 unsigned op,
4355 int dst_reg,
4356 struct r600_shader_src *src,
4357 bool abs)
4358 {
4359 struct r600_bytecode_alu alu;
4360 const int last_slot = 3;
4361 int r;
4362
4363 /* these have to write the result to X/Y by the looks of it */
4364 for (int i = 0 ; i < last_slot; i++) {
4365 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4366 alu.op = op;
4367
4368 r600_bytecode_src(&alu.src[0], src, 1);
4369 r600_bytecode_src(&alu.src[1], src, 0);
4370
4371 if (abs)
4372 r600_bytecode_src_set_abs(&alu.src[1]);
4373
4374 alu.dst.sel = dst_reg;
4375 alu.dst.chan = i;
4376 alu.dst.write = (i == 0 || i == 1);
4377
4378 if (bc->chip_class != CAYMAN || i == last_slot - 1)
4379 alu.last = 1;
4380 r = r600_bytecode_add_alu(bc, &alu);
4381 if (r)
4382 return r;
4383 }
4384
4385 return 0;
4386 }
4387
4388 static int cayman_emit_double_instr(struct r600_shader_ctx *ctx)
4389 {
4390 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4391 int i, r;
4392 struct r600_bytecode_alu alu;
4393 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4394 int t1 = ctx->temp_reg;
4395
4396 /* should only be one src regs */
4397 assert(inst->Instruction.NumSrcRegs == 1);
4398
4399 /* only support one double at a time */
4400 assert(inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ||
4401 inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_ZW);
4402
4403 r = cayman_emit_unary_double_raw(
4404 ctx->bc, ctx->inst_info->op, t1,
4405 &ctx->src[0],
4406 ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DRSQ ||
4407 ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DSQRT);
4408 if (r)
4409 return r;
4410
4411 for (i = 0 ; i <= lasti; i++) {
4412 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4413 continue;
4414 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4415 alu.op = ALU_OP1_MOV;
4416 alu.src[0].sel = t1;
4417 alu.src[0].chan = (i == 0 || i == 2) ? 0 : 1;
4418 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4419 alu.dst.write = 1;
4420 if (i == lasti)
4421 alu.last = 1;
4422 r = r600_bytecode_add_alu(ctx->bc, &alu);
4423 if (r)
4424 return r;
4425 }
4426 return 0;
4427 }
4428
4429 static int cayman_emit_float_instr(struct r600_shader_ctx *ctx)
4430 {
4431 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4432 int i, j, r;
4433 struct r600_bytecode_alu alu;
4434 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
4435
4436 for (i = 0 ; i < last_slot; i++) {
4437 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4438 alu.op = ctx->inst_info->op;
4439 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4440 r600_bytecode_src(&alu.src[j], &ctx->src[j], 0);
4441
4442 /* RSQ should take the absolute value of src */
4443 if (inst->Instruction.Opcode == TGSI_OPCODE_RSQ) {
4444 r600_bytecode_src_set_abs(&alu.src[j]);
4445 }
4446 }
4447 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4448 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
4449
4450 if (i == last_slot - 1)
4451 alu.last = 1;
4452 r = r600_bytecode_add_alu(ctx->bc, &alu);
4453 if (r)
4454 return r;
4455 }
4456 return 0;
4457 }
4458
4459 static int cayman_mul_int_instr(struct r600_shader_ctx *ctx)
4460 {
4461 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4462 int i, j, k, r;
4463 struct r600_bytecode_alu alu;
4464 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4465 int t1 = ctx->temp_reg;
4466
4467 for (k = 0; k <= lasti; k++) {
4468 if (!(inst->Dst[0].Register.WriteMask & (1 << k)))
4469 continue;
4470
4471 for (i = 0 ; i < 4; i++) {
4472 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4473 alu.op = ctx->inst_info->op;
4474 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4475 r600_bytecode_src(&alu.src[j], &ctx->src[j], k);
4476 }
4477 alu.dst.sel = t1;
4478 alu.dst.chan = i;
4479 alu.dst.write = (i == k);
4480 if (i == 3)
4481 alu.last = 1;
4482 r = r600_bytecode_add_alu(ctx->bc, &alu);
4483 if (r)
4484 return r;
4485 }
4486 }
4487
4488 for (i = 0 ; i <= lasti; i++) {
4489 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4490 continue;
4491 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4492 alu.op = ALU_OP1_MOV;
4493 alu.src[0].sel = t1;
4494 alu.src[0].chan = i;
4495 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4496 alu.dst.write = 1;
4497 if (i == lasti)
4498 alu.last = 1;
4499 r = r600_bytecode_add_alu(ctx->bc, &alu);
4500 if (r)
4501 return r;
4502 }
4503
4504 return 0;
4505 }
4506
4507
4508 static int cayman_mul_double_instr(struct r600_shader_ctx *ctx)
4509 {
4510 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4511 int i, j, k, r;
4512 struct r600_bytecode_alu alu;
4513 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4514 int t1 = ctx->temp_reg;
4515
4516 /* t1 would get overwritten below if we actually tried to
4517 * multiply two pairs of doubles at a time. */
4518 assert(inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ||
4519 inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_ZW);
4520
4521 k = inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ? 0 : 1;
4522
4523 for (i = 0; i < 4; i++) {
4524 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4525 alu.op = ctx->inst_info->op;
4526 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
4527 r600_bytecode_src(&alu.src[j], &ctx->src[j], k * 2 + ((i == 3) ? 0 : 1));
4528 }
4529 alu.dst.sel = t1;
4530 alu.dst.chan = i;
4531 alu.dst.write = 1;
4532 if (i == 3)
4533 alu.last = 1;
4534 r = r600_bytecode_add_alu(ctx->bc, &alu);
4535 if (r)
4536 return r;
4537 }
4538
4539 for (i = 0; i <= lasti; i++) {
4540 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4541 continue;
4542 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4543 alu.op = ALU_OP1_MOV;
4544 alu.src[0].sel = t1;
4545 alu.src[0].chan = i;
4546 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4547 alu.dst.write = 1;
4548 if (i == lasti)
4549 alu.last = 1;
4550 r = r600_bytecode_add_alu(ctx->bc, &alu);
4551 if (r)
4552 return r;
4553 }
4554
4555 return 0;
4556 }
4557
4558 /*
4559 * Emit RECIP_64 + MUL_64 to implement division.
4560 */
4561 static int cayman_ddiv_instr(struct r600_shader_ctx *ctx)
4562 {
4563 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4564 int r;
4565 struct r600_bytecode_alu alu;
4566 int t1 = ctx->temp_reg;
4567 int k;
4568
4569 /* Only support one double at a time. This is the same constraint as
4570 * in DMUL lowering. */
4571 assert(inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ||
4572 inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_ZW);
4573
4574 k = inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ? 0 : 1;
4575
4576 r = cayman_emit_unary_double_raw(ctx->bc, ALU_OP2_RECIP_64, t1, &ctx->src[1], false);
4577 if (r)
4578 return r;
4579
4580 for (int i = 0; i < 4; i++) {
4581 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4582 alu.op = ALU_OP2_MUL_64;
4583
4584 r600_bytecode_src(&alu.src[0], &ctx->src[0], k * 2 + ((i == 3) ? 0 : 1));
4585
4586 alu.src[1].sel = t1;
4587 alu.src[1].chan = (i == 3) ? 0 : 1;
4588
4589 alu.dst.sel = t1;
4590 alu.dst.chan = i;
4591 alu.dst.write = 1;
4592 if (i == 3)
4593 alu.last = 1;
4594 r = r600_bytecode_add_alu(ctx->bc, &alu);
4595 if (r)
4596 return r;
4597 }
4598
4599 for (int i = 0; i < 2; i++) {
4600 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4601 alu.op = ALU_OP1_MOV;
4602 alu.src[0].sel = t1;
4603 alu.src[0].chan = i;
4604 tgsi_dst(ctx, &inst->Dst[0], k * 2 + i, &alu.dst);
4605 alu.dst.write = 1;
4606 if (i == 1)
4607 alu.last = 1;
4608 r = r600_bytecode_add_alu(ctx->bc, &alu);
4609 if (r)
4610 return r;
4611 }
4612 return 0;
4613 }
4614
4615 /*
4616 * r600 - trunc to -PI..PI range
4617 * r700 - normalize by dividing by 2PI
4618 * see fdo bug 27901
4619 */
4620 static int tgsi_setup_trig(struct r600_shader_ctx *ctx)
4621 {
4622 int r;
4623 struct r600_bytecode_alu alu;
4624
4625 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4626 alu.op = ALU_OP3_MULADD;
4627 alu.is_op3 = 1;
4628
4629 alu.dst.chan = 0;
4630 alu.dst.sel = ctx->temp_reg;
4631 alu.dst.write = 1;
4632
4633 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
4634
4635 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
4636 alu.src[1].chan = 0;
4637 alu.src[1].value = u_bitcast_f2u(0.5f * M_1_PI);
4638 alu.src[2].sel = V_SQ_ALU_SRC_0_5;
4639 alu.src[2].chan = 0;
4640 alu.last = 1;
4641 r = r600_bytecode_add_alu(ctx->bc, &alu);
4642 if (r)
4643 return r;
4644
4645 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4646 alu.op = ALU_OP1_FRACT;
4647
4648 alu.dst.chan = 0;
4649 alu.dst.sel = ctx->temp_reg;
4650 alu.dst.write = 1;
4651
4652 alu.src[0].sel = ctx->temp_reg;
4653 alu.src[0].chan = 0;
4654 alu.last = 1;
4655 r = r600_bytecode_add_alu(ctx->bc, &alu);
4656 if (r)
4657 return r;
4658
4659 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4660 alu.op = ALU_OP3_MULADD;
4661 alu.is_op3 = 1;
4662
4663 alu.dst.chan = 0;
4664 alu.dst.sel = ctx->temp_reg;
4665 alu.dst.write = 1;
4666
4667 alu.src[0].sel = ctx->temp_reg;
4668 alu.src[0].chan = 0;
4669
4670 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
4671 alu.src[1].chan = 0;
4672 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
4673 alu.src[2].chan = 0;
4674
4675 if (ctx->bc->chip_class == R600) {
4676 alu.src[1].value = u_bitcast_f2u(2.0f * M_PI);
4677 alu.src[2].value = u_bitcast_f2u(-M_PI);
4678 } else {
4679 alu.src[1].sel = V_SQ_ALU_SRC_1;
4680 alu.src[2].sel = V_SQ_ALU_SRC_0_5;
4681 alu.src[2].neg = 1;
4682 }
4683
4684 alu.last = 1;
4685 r = r600_bytecode_add_alu(ctx->bc, &alu);
4686 if (r)
4687 return r;
4688 return 0;
4689 }
4690
4691 static int cayman_trig(struct r600_shader_ctx *ctx)
4692 {
4693 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4694 struct r600_bytecode_alu alu;
4695 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
4696 int i, r;
4697
4698 r = tgsi_setup_trig(ctx);
4699 if (r)
4700 return r;
4701
4702
4703 for (i = 0; i < last_slot; i++) {
4704 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4705 alu.op = ctx->inst_info->op;
4706 alu.dst.chan = i;
4707
4708 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4709 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
4710
4711 alu.src[0].sel = ctx->temp_reg;
4712 alu.src[0].chan = 0;
4713 if (i == last_slot - 1)
4714 alu.last = 1;
4715 r = r600_bytecode_add_alu(ctx->bc, &alu);
4716 if (r)
4717 return r;
4718 }
4719 return 0;
4720 }
4721
4722 static int tgsi_trig(struct r600_shader_ctx *ctx)
4723 {
4724 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4725 struct r600_bytecode_alu alu;
4726 int i, r;
4727 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4728
4729 r = tgsi_setup_trig(ctx);
4730 if (r)
4731 return r;
4732
4733 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4734 alu.op = ctx->inst_info->op;
4735 alu.dst.chan = 0;
4736 alu.dst.sel = ctx->temp_reg;
4737 alu.dst.write = 1;
4738
4739 alu.src[0].sel = ctx->temp_reg;
4740 alu.src[0].chan = 0;
4741 alu.last = 1;
4742 r = r600_bytecode_add_alu(ctx->bc, &alu);
4743 if (r)
4744 return r;
4745
4746 /* replicate result */
4747 for (i = 0; i < lasti + 1; i++) {
4748 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4749 continue;
4750
4751 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4752 alu.op = ALU_OP1_MOV;
4753
4754 alu.src[0].sel = ctx->temp_reg;
4755 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4756 if (i == lasti)
4757 alu.last = 1;
4758 r = r600_bytecode_add_alu(ctx->bc, &alu);
4759 if (r)
4760 return r;
4761 }
4762 return 0;
4763 }
4764
4765 static int tgsi_kill(struct r600_shader_ctx *ctx)
4766 {
4767 const struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4768 struct r600_bytecode_alu alu;
4769 int i, r;
4770
4771 for (i = 0; i < 4; i++) {
4772 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4773 alu.op = ctx->inst_info->op;
4774
4775 alu.dst.chan = i;
4776
4777 alu.src[0].sel = V_SQ_ALU_SRC_0;
4778
4779 if (inst->Instruction.Opcode == TGSI_OPCODE_KILL) {
4780 alu.src[1].sel = V_SQ_ALU_SRC_1;
4781 alu.src[1].neg = 1;
4782 } else {
4783 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4784 }
4785 if (i == 3) {
4786 alu.last = 1;
4787 }
4788 r = r600_bytecode_add_alu(ctx->bc, &alu);
4789 if (r)
4790 return r;
4791 }
4792
4793 /* kill must be last in ALU */
4794 ctx->bc->force_add_cf = 1;
4795 ctx->shader->uses_kill = TRUE;
4796 return 0;
4797 }
4798
4799 static int tgsi_lit(struct r600_shader_ctx *ctx)
4800 {
4801 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4802 struct r600_bytecode_alu alu;
4803 int r;
4804
4805 /* tmp.x = max(src.y, 0.0) */
4806 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4807 alu.op = ALU_OP2_MAX;
4808 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
4809 alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/
4810 alu.src[1].chan = 1;
4811
4812 alu.dst.sel = ctx->temp_reg;
4813 alu.dst.chan = 0;
4814 alu.dst.write = 1;
4815
4816 alu.last = 1;
4817 r = r600_bytecode_add_alu(ctx->bc, &alu);
4818 if (r)
4819 return r;
4820
4821 if (inst->Dst[0].Register.WriteMask & (1 << 2))
4822 {
4823 int chan;
4824 int sel;
4825 unsigned i;
4826
4827 if (ctx->bc->chip_class == CAYMAN) {
4828 for (i = 0; i < 3; i++) {
4829 /* tmp.z = log(tmp.x) */
4830 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4831 alu.op = ALU_OP1_LOG_CLAMPED;
4832 alu.src[0].sel = ctx->temp_reg;
4833 alu.src[0].chan = 0;
4834 alu.dst.sel = ctx->temp_reg;
4835 alu.dst.chan = i;
4836 if (i == 2) {
4837 alu.dst.write = 1;
4838 alu.last = 1;
4839 } else
4840 alu.dst.write = 0;
4841
4842 r = r600_bytecode_add_alu(ctx->bc, &alu);
4843 if (r)
4844 return r;
4845 }
4846 } else {
4847 /* tmp.z = log(tmp.x) */
4848 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4849 alu.op = ALU_OP1_LOG_CLAMPED;
4850 alu.src[0].sel = ctx->temp_reg;
4851 alu.src[0].chan = 0;
4852 alu.dst.sel = ctx->temp_reg;
4853 alu.dst.chan = 2;
4854 alu.dst.write = 1;
4855 alu.last = 1;
4856 r = r600_bytecode_add_alu(ctx->bc, &alu);
4857 if (r)
4858 return r;
4859 }
4860
4861 chan = alu.dst.chan;
4862 sel = alu.dst.sel;
4863
4864 /* tmp.x = amd MUL_LIT(tmp.z, src.w, src.x ) */
4865 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4866 alu.op = ALU_OP3_MUL_LIT;
4867 alu.src[0].sel = sel;
4868 alu.src[0].chan = chan;
4869 r600_bytecode_src(&alu.src[1], &ctx->src[0], 3);
4870 r600_bytecode_src(&alu.src[2], &ctx->src[0], 0);
4871 alu.dst.sel = ctx->temp_reg;
4872 alu.dst.chan = 0;
4873 alu.dst.write = 1;
4874 alu.is_op3 = 1;
4875 alu.last = 1;
4876 r = r600_bytecode_add_alu(ctx->bc, &alu);
4877 if (r)
4878 return r;
4879
4880 if (ctx->bc->chip_class == CAYMAN) {
4881 for (i = 0; i < 3; i++) {
4882 /* dst.z = exp(tmp.x) */
4883 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4884 alu.op = ALU_OP1_EXP_IEEE;
4885 alu.src[0].sel = ctx->temp_reg;
4886 alu.src[0].chan = 0;
4887 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4888 if (i == 2) {
4889 alu.dst.write = 1;
4890 alu.last = 1;
4891 } else
4892 alu.dst.write = 0;
4893 r = r600_bytecode_add_alu(ctx->bc, &alu);
4894 if (r)
4895 return r;
4896 }
4897 } else {
4898 /* dst.z = exp(tmp.x) */
4899 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4900 alu.op = ALU_OP1_EXP_IEEE;
4901 alu.src[0].sel = ctx->temp_reg;
4902 alu.src[0].chan = 0;
4903 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
4904 alu.last = 1;
4905 r = r600_bytecode_add_alu(ctx->bc, &alu);
4906 if (r)
4907 return r;
4908 }
4909 }
4910
4911 /* dst.x, <- 1.0 */
4912 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4913 alu.op = ALU_OP1_MOV;
4914 alu.src[0].sel = V_SQ_ALU_SRC_1; /*1.0*/
4915 alu.src[0].chan = 0;
4916 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
4917 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 0) & 1;
4918 r = r600_bytecode_add_alu(ctx->bc, &alu);
4919 if (r)
4920 return r;
4921
4922 /* dst.y = max(src.x, 0.0) */
4923 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4924 alu.op = ALU_OP2_MAX;
4925 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
4926 alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/
4927 alu.src[1].chan = 0;
4928 tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
4929 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 1) & 1;
4930 r = r600_bytecode_add_alu(ctx->bc, &alu);
4931 if (r)
4932 return r;
4933
4934 /* dst.w, <- 1.0 */
4935 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4936 alu.op = ALU_OP1_MOV;
4937 alu.src[0].sel = V_SQ_ALU_SRC_1;
4938 alu.src[0].chan = 0;
4939 tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
4940 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 3) & 1;
4941 alu.last = 1;
4942 r = r600_bytecode_add_alu(ctx->bc, &alu);
4943 if (r)
4944 return r;
4945
4946 return 0;
4947 }
4948
4949 static int tgsi_rsq(struct r600_shader_ctx *ctx)
4950 {
4951 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4952 struct r600_bytecode_alu alu;
4953 int i, r;
4954
4955 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4956
4957 alu.op = ALU_OP1_RECIPSQRT_IEEE;
4958
4959 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
4960 r600_bytecode_src(&alu.src[i], &ctx->src[i], 0);
4961 r600_bytecode_src_set_abs(&alu.src[i]);
4962 }
4963 alu.dst.sel = ctx->temp_reg;
4964 alu.dst.write = 1;
4965 alu.last = 1;
4966 r = r600_bytecode_add_alu(ctx->bc, &alu);
4967 if (r)
4968 return r;
4969 /* replicate result */
4970 return tgsi_helper_tempx_replicate(ctx);
4971 }
4972
4973 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx)
4974 {
4975 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4976 struct r600_bytecode_alu alu;
4977 int i, r;
4978
4979 for (i = 0; i < 4; i++) {
4980 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4981 alu.src[0].sel = ctx->temp_reg;
4982 alu.op = ALU_OP1_MOV;
4983 alu.dst.chan = i;
4984 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4985 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
4986 if (i == 3)
4987 alu.last = 1;
4988 r = r600_bytecode_add_alu(ctx->bc, &alu);
4989 if (r)
4990 return r;
4991 }
4992 return 0;
4993 }
4994
4995 static int tgsi_trans_srcx_replicate(struct r600_shader_ctx *ctx)
4996 {
4997 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4998 struct r600_bytecode_alu alu;
4999 int i, r;
5000
5001 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5002 alu.op = ctx->inst_info->op;
5003 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
5004 r600_bytecode_src(&alu.src[i], &ctx->src[i], 0);
5005 }
5006 alu.dst.sel = ctx->temp_reg;
5007 alu.dst.write = 1;
5008 alu.last = 1;
5009 r = r600_bytecode_add_alu(ctx->bc, &alu);
5010 if (r)
5011 return r;
5012 /* replicate result */
5013 return tgsi_helper_tempx_replicate(ctx);
5014 }
5015
5016 static int cayman_pow(struct r600_shader_ctx *ctx)
5017 {
5018 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5019 int i, r;
5020 struct r600_bytecode_alu alu;
5021 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
5022
5023 for (i = 0; i < 3; i++) {
5024 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5025 alu.op = ALU_OP1_LOG_IEEE;
5026 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5027 alu.dst.sel = ctx->temp_reg;
5028 alu.dst.chan = i;
5029 alu.dst.write = 1;
5030 if (i == 2)
5031 alu.last = 1;
5032 r = r600_bytecode_add_alu(ctx->bc, &alu);
5033 if (r)
5034 return r;
5035 }
5036
5037 /* b * LOG2(a) */
5038 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5039 alu.op = ALU_OP2_MUL;
5040 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
5041 alu.src[1].sel = ctx->temp_reg;
5042 alu.dst.sel = ctx->temp_reg;
5043 alu.dst.write = 1;
5044 alu.last = 1;
5045 r = r600_bytecode_add_alu(ctx->bc, &alu);
5046 if (r)
5047 return r;
5048
5049 for (i = 0; i < last_slot; i++) {
5050 /* POW(a,b) = EXP2(b * LOG2(a))*/
5051 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5052 alu.op = ALU_OP1_EXP_IEEE;
5053 alu.src[0].sel = ctx->temp_reg;
5054
5055 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5056 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
5057 if (i == last_slot - 1)
5058 alu.last = 1;
5059 r = r600_bytecode_add_alu(ctx->bc, &alu);
5060 if (r)
5061 return r;
5062 }
5063 return 0;
5064 }
5065
5066 static int tgsi_pow(struct r600_shader_ctx *ctx)
5067 {
5068 struct r600_bytecode_alu alu;
5069 int r;
5070
5071 /* LOG2(a) */
5072 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5073 alu.op = ALU_OP1_LOG_IEEE;
5074 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5075 alu.dst.sel = ctx->temp_reg;
5076 alu.dst.write = 1;
5077 alu.last = 1;
5078 r = r600_bytecode_add_alu(ctx->bc, &alu);
5079 if (r)
5080 return r;
5081 /* b * LOG2(a) */
5082 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5083 alu.op = ALU_OP2_MUL;
5084 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
5085 alu.src[1].sel = ctx->temp_reg;
5086 alu.dst.sel = ctx->temp_reg;
5087 alu.dst.write = 1;
5088 alu.last = 1;
5089 r = r600_bytecode_add_alu(ctx->bc, &alu);
5090 if (r)
5091 return r;
5092 /* POW(a,b) = EXP2(b * LOG2(a))*/
5093 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5094 alu.op = ALU_OP1_EXP_IEEE;
5095 alu.src[0].sel = ctx->temp_reg;
5096 alu.dst.sel = ctx->temp_reg;
5097 alu.dst.write = 1;
5098 alu.last = 1;
5099 r = r600_bytecode_add_alu(ctx->bc, &alu);
5100 if (r)
5101 return r;
5102 return tgsi_helper_tempx_replicate(ctx);
5103 }
5104
5105 static int tgsi_divmod(struct r600_shader_ctx *ctx, int mod, int signed_op)
5106 {
5107 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5108 struct r600_bytecode_alu alu;
5109 int i, r, j;
5110 unsigned write_mask = inst->Dst[0].Register.WriteMask;
5111 int tmp0 = ctx->temp_reg;
5112 int tmp1 = r600_get_temp(ctx);
5113 int tmp2 = r600_get_temp(ctx);
5114 int tmp3 = r600_get_temp(ctx);
5115 /* Unsigned path:
5116 *
5117 * we need to represent src1 as src2*q + r, where q - quotient, r - remainder
5118 *
5119 * 1. tmp0.x = rcp (src2) = 2^32/src2 + e, where e is rounding error
5120 * 2. tmp0.z = lo (tmp0.x * src2)
5121 * 3. tmp0.w = -tmp0.z
5122 * 4. tmp0.y = hi (tmp0.x * src2)
5123 * 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src2))
5124 * 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error
5125 * 7. tmp1.x = tmp0.x - tmp0.w
5126 * 8. tmp1.y = tmp0.x + tmp0.w
5127 * 9. tmp0.x = (tmp0.y == 0 ? tmp1.y : tmp1.x)
5128 * 10. tmp0.z = hi(tmp0.x * src1) = q
5129 * 11. tmp0.y = lo (tmp0.z * src2) = src2*q = src1 - r
5130 *
5131 * 12. tmp0.w = src1 - tmp0.y = r
5132 * 13. tmp1.x = tmp0.w >= src2 = r >= src2 (uint comparison)
5133 * 14. tmp1.y = src1 >= tmp0.y = r >= 0 (uint comparison)
5134 *
5135 * if DIV
5136 *
5137 * 15. tmp1.z = tmp0.z + 1 = q + 1
5138 * 16. tmp1.w = tmp0.z - 1 = q - 1
5139 *
5140 * else MOD
5141 *
5142 * 15. tmp1.z = tmp0.w - src2 = r - src2
5143 * 16. tmp1.w = tmp0.w + src2 = r + src2
5144 *
5145 * endif
5146 *
5147 * 17. tmp1.x = tmp1.x & tmp1.y
5148 *
5149 * DIV: 18. tmp0.z = tmp1.x==0 ? tmp0.z : tmp1.z
5150 * MOD: 18. tmp0.z = tmp1.x==0 ? tmp0.w : tmp1.z
5151 *
5152 * 19. tmp0.z = tmp1.y==0 ? tmp1.w : tmp0.z
5153 * 20. dst = src2==0 ? MAX_UINT : tmp0.z
5154 *
5155 * Signed path:
5156 *
5157 * Same as unsigned, using abs values of the operands,
5158 * and fixing the sign of the result in the end.
5159 */
5160
5161 for (i = 0; i < 4; i++) {
5162 if (!(write_mask & (1<<i)))
5163 continue;
5164
5165 if (signed_op) {
5166
5167 /* tmp2.x = -src0 */
5168 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5169 alu.op = ALU_OP2_SUB_INT;
5170
5171 alu.dst.sel = tmp2;
5172 alu.dst.chan = 0;
5173 alu.dst.write = 1;
5174
5175 alu.src[0].sel = V_SQ_ALU_SRC_0;
5176
5177 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
5178
5179 alu.last = 1;
5180 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5181 return r;
5182
5183 /* tmp2.y = -src1 */
5184 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5185 alu.op = ALU_OP2_SUB_INT;
5186
5187 alu.dst.sel = tmp2;
5188 alu.dst.chan = 1;
5189 alu.dst.write = 1;
5190
5191 alu.src[0].sel = V_SQ_ALU_SRC_0;
5192
5193 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5194
5195 alu.last = 1;
5196 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5197 return r;
5198
5199 /* tmp2.z sign bit is set if src0 and src2 signs are different */
5200 /* it will be a sign of the quotient */
5201 if (!mod) {
5202
5203 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5204 alu.op = ALU_OP2_XOR_INT;
5205
5206 alu.dst.sel = tmp2;
5207 alu.dst.chan = 2;
5208 alu.dst.write = 1;
5209
5210 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5211 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5212
5213 alu.last = 1;
5214 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5215 return r;
5216 }
5217
5218 /* tmp2.x = |src0| */
5219 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5220 alu.op = ALU_OP3_CNDGE_INT;
5221 alu.is_op3 = 1;
5222
5223 alu.dst.sel = tmp2;
5224 alu.dst.chan = 0;
5225 alu.dst.write = 1;
5226
5227 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5228 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
5229 alu.src[2].sel = tmp2;
5230 alu.src[2].chan = 0;
5231
5232 alu.last = 1;
5233 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5234 return r;
5235
5236 /* tmp2.y = |src1| */
5237 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5238 alu.op = ALU_OP3_CNDGE_INT;
5239 alu.is_op3 = 1;
5240
5241 alu.dst.sel = tmp2;
5242 alu.dst.chan = 1;
5243 alu.dst.write = 1;
5244
5245 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
5246 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5247 alu.src[2].sel = tmp2;
5248 alu.src[2].chan = 1;
5249
5250 alu.last = 1;
5251 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5252 return r;
5253
5254 }
5255
5256 /* 1. tmp0.x = rcp_u (src2) = 2^32/src2 + e, where e is rounding error */
5257 if (ctx->bc->chip_class == CAYMAN) {
5258 /* tmp3.x = u2f(src2) */
5259 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5260 alu.op = ALU_OP1_UINT_TO_FLT;
5261
5262 alu.dst.sel = tmp3;
5263 alu.dst.chan = 0;
5264 alu.dst.write = 1;
5265
5266 if (signed_op) {
5267 alu.src[0].sel = tmp2;
5268 alu.src[0].chan = 1;
5269 } else {
5270 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
5271 }
5272
5273 alu.last = 1;
5274 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5275 return r;
5276
5277 /* tmp0.x = recip(tmp3.x) */
5278 for (j = 0 ; j < 3; j++) {
5279 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5280 alu.op = ALU_OP1_RECIP_IEEE;
5281
5282 alu.dst.sel = tmp0;
5283 alu.dst.chan = j;
5284 alu.dst.write = (j == 0);
5285
5286 alu.src[0].sel = tmp3;
5287 alu.src[0].chan = 0;
5288
5289 if (j == 2)
5290 alu.last = 1;
5291 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5292 return r;
5293 }
5294
5295 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5296 alu.op = ALU_OP2_MUL;
5297
5298 alu.src[0].sel = tmp0;
5299 alu.src[0].chan = 0;
5300
5301 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
5302 alu.src[1].value = 0x4f800000;
5303
5304 alu.dst.sel = tmp3;
5305 alu.dst.write = 1;
5306 alu.last = 1;
5307 r = r600_bytecode_add_alu(ctx->bc, &alu);
5308 if (r)
5309 return r;
5310
5311 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5312 alu.op = ALU_OP1_FLT_TO_UINT;
5313
5314 alu.dst.sel = tmp0;
5315 alu.dst.chan = 0;
5316 alu.dst.write = 1;
5317
5318 alu.src[0].sel = tmp3;
5319 alu.src[0].chan = 0;
5320
5321 alu.last = 1;
5322 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5323 return r;
5324
5325 } else {
5326 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5327 alu.op = ALU_OP1_RECIP_UINT;
5328
5329 alu.dst.sel = tmp0;
5330 alu.dst.chan = 0;
5331 alu.dst.write = 1;
5332
5333 if (signed_op) {
5334 alu.src[0].sel = tmp2;
5335 alu.src[0].chan = 1;
5336 } else {
5337 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
5338 }
5339
5340 alu.last = 1;
5341 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5342 return r;
5343 }
5344
5345 /* 2. tmp0.z = lo (tmp0.x * src2) */
5346 if (ctx->bc->chip_class == CAYMAN) {
5347 for (j = 0 ; j < 4; j++) {
5348 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5349 alu.op = ALU_OP2_MULLO_UINT;
5350
5351 alu.dst.sel = tmp0;
5352 alu.dst.chan = j;
5353 alu.dst.write = (j == 2);
5354
5355 alu.src[0].sel = tmp0;
5356 alu.src[0].chan = 0;
5357 if (signed_op) {
5358 alu.src[1].sel = tmp2;
5359 alu.src[1].chan = 1;
5360 } else {
5361 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5362 }
5363
5364 alu.last = (j == 3);
5365 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5366 return r;
5367 }
5368 } else {
5369 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5370 alu.op = ALU_OP2_MULLO_UINT;
5371
5372 alu.dst.sel = tmp0;
5373 alu.dst.chan = 2;
5374 alu.dst.write = 1;
5375
5376 alu.src[0].sel = tmp0;
5377 alu.src[0].chan = 0;
5378 if (signed_op) {
5379 alu.src[1].sel = tmp2;
5380 alu.src[1].chan = 1;
5381 } else {
5382 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5383 }
5384
5385 alu.last = 1;
5386 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5387 return r;
5388 }
5389
5390 /* 3. tmp0.w = -tmp0.z */
5391 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5392 alu.op = ALU_OP2_SUB_INT;
5393
5394 alu.dst.sel = tmp0;
5395 alu.dst.chan = 3;
5396 alu.dst.write = 1;
5397
5398 alu.src[0].sel = V_SQ_ALU_SRC_0;
5399 alu.src[1].sel = tmp0;
5400 alu.src[1].chan = 2;
5401
5402 alu.last = 1;
5403 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5404 return r;
5405
5406 /* 4. tmp0.y = hi (tmp0.x * src2) */
5407 if (ctx->bc->chip_class == CAYMAN) {
5408 for (j = 0 ; j < 4; j++) {
5409 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5410 alu.op = ALU_OP2_MULHI_UINT;
5411
5412 alu.dst.sel = tmp0;
5413 alu.dst.chan = j;
5414 alu.dst.write = (j == 1);
5415
5416 alu.src[0].sel = tmp0;
5417 alu.src[0].chan = 0;
5418
5419 if (signed_op) {
5420 alu.src[1].sel = tmp2;
5421 alu.src[1].chan = 1;
5422 } else {
5423 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5424 }
5425 alu.last = (j == 3);
5426 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5427 return r;
5428 }
5429 } else {
5430 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5431 alu.op = ALU_OP2_MULHI_UINT;
5432
5433 alu.dst.sel = tmp0;
5434 alu.dst.chan = 1;
5435 alu.dst.write = 1;
5436
5437 alu.src[0].sel = tmp0;
5438 alu.src[0].chan = 0;
5439
5440 if (signed_op) {
5441 alu.src[1].sel = tmp2;
5442 alu.src[1].chan = 1;
5443 } else {
5444 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5445 }
5446
5447 alu.last = 1;
5448 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5449 return r;
5450 }
5451
5452 /* 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src)) */
5453 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5454 alu.op = ALU_OP3_CNDE_INT;
5455 alu.is_op3 = 1;
5456
5457 alu.dst.sel = tmp0;
5458 alu.dst.chan = 2;
5459 alu.dst.write = 1;
5460
5461 alu.src[0].sel = tmp0;
5462 alu.src[0].chan = 1;
5463 alu.src[1].sel = tmp0;
5464 alu.src[1].chan = 3;
5465 alu.src[2].sel = tmp0;
5466 alu.src[2].chan = 2;
5467
5468 alu.last = 1;
5469 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5470 return r;
5471
5472 /* 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error */
5473 if (ctx->bc->chip_class == CAYMAN) {
5474 for (j = 0 ; j < 4; j++) {
5475 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5476 alu.op = ALU_OP2_MULHI_UINT;
5477
5478 alu.dst.sel = tmp0;
5479 alu.dst.chan = j;
5480 alu.dst.write = (j == 3);
5481
5482 alu.src[0].sel = tmp0;
5483 alu.src[0].chan = 2;
5484
5485 alu.src[1].sel = tmp0;
5486 alu.src[1].chan = 0;
5487
5488 alu.last = (j == 3);
5489 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5490 return r;
5491 }
5492 } else {
5493 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5494 alu.op = ALU_OP2_MULHI_UINT;
5495
5496 alu.dst.sel = tmp0;
5497 alu.dst.chan = 3;
5498 alu.dst.write = 1;
5499
5500 alu.src[0].sel = tmp0;
5501 alu.src[0].chan = 2;
5502
5503 alu.src[1].sel = tmp0;
5504 alu.src[1].chan = 0;
5505
5506 alu.last = 1;
5507 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5508 return r;
5509 }
5510
5511 /* 7. tmp1.x = tmp0.x - tmp0.w */
5512 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5513 alu.op = ALU_OP2_SUB_INT;
5514
5515 alu.dst.sel = tmp1;
5516 alu.dst.chan = 0;
5517 alu.dst.write = 1;
5518
5519 alu.src[0].sel = tmp0;
5520 alu.src[0].chan = 0;
5521 alu.src[1].sel = tmp0;
5522 alu.src[1].chan = 3;
5523
5524 alu.last = 1;
5525 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5526 return r;
5527
5528 /* 8. tmp1.y = tmp0.x + tmp0.w */
5529 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5530 alu.op = ALU_OP2_ADD_INT;
5531
5532 alu.dst.sel = tmp1;
5533 alu.dst.chan = 1;
5534 alu.dst.write = 1;
5535
5536 alu.src[0].sel = tmp0;
5537 alu.src[0].chan = 0;
5538 alu.src[1].sel = tmp0;
5539 alu.src[1].chan = 3;
5540
5541 alu.last = 1;
5542 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5543 return r;
5544
5545 /* 9. tmp0.x = (tmp0.y == 0 ? tmp1.y : tmp1.x) */
5546 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5547 alu.op = ALU_OP3_CNDE_INT;
5548 alu.is_op3 = 1;
5549
5550 alu.dst.sel = tmp0;
5551 alu.dst.chan = 0;
5552 alu.dst.write = 1;
5553
5554 alu.src[0].sel = tmp0;
5555 alu.src[0].chan = 1;
5556 alu.src[1].sel = tmp1;
5557 alu.src[1].chan = 1;
5558 alu.src[2].sel = tmp1;
5559 alu.src[2].chan = 0;
5560
5561 alu.last = 1;
5562 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5563 return r;
5564
5565 /* 10. tmp0.z = hi(tmp0.x * src1) = q */
5566 if (ctx->bc->chip_class == CAYMAN) {
5567 for (j = 0 ; j < 4; j++) {
5568 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5569 alu.op = ALU_OP2_MULHI_UINT;
5570
5571 alu.dst.sel = tmp0;
5572 alu.dst.chan = j;
5573 alu.dst.write = (j == 2);
5574
5575 alu.src[0].sel = tmp0;
5576 alu.src[0].chan = 0;
5577
5578 if (signed_op) {
5579 alu.src[1].sel = tmp2;
5580 alu.src[1].chan = 0;
5581 } else {
5582 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
5583 }
5584
5585 alu.last = (j == 3);
5586 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5587 return r;
5588 }
5589 } else {
5590 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5591 alu.op = ALU_OP2_MULHI_UINT;
5592
5593 alu.dst.sel = tmp0;
5594 alu.dst.chan = 2;
5595 alu.dst.write = 1;
5596
5597 alu.src[0].sel = tmp0;
5598 alu.src[0].chan = 0;
5599
5600 if (signed_op) {
5601 alu.src[1].sel = tmp2;
5602 alu.src[1].chan = 0;
5603 } else {
5604 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
5605 }
5606
5607 alu.last = 1;
5608 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5609 return r;
5610 }
5611
5612 /* 11. tmp0.y = lo (src2 * tmp0.z) = src2*q = src1 - r */
5613 if (ctx->bc->chip_class == CAYMAN) {
5614 for (j = 0 ; j < 4; j++) {
5615 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5616 alu.op = ALU_OP2_MULLO_UINT;
5617
5618 alu.dst.sel = tmp0;
5619 alu.dst.chan = j;
5620 alu.dst.write = (j == 1);
5621
5622 if (signed_op) {
5623 alu.src[0].sel = tmp2;
5624 alu.src[0].chan = 1;
5625 } else {
5626 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
5627 }
5628
5629 alu.src[1].sel = tmp0;
5630 alu.src[1].chan = 2;
5631
5632 alu.last = (j == 3);
5633 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5634 return r;
5635 }
5636 } else {
5637 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5638 alu.op = ALU_OP2_MULLO_UINT;
5639
5640 alu.dst.sel = tmp0;
5641 alu.dst.chan = 1;
5642 alu.dst.write = 1;
5643
5644 if (signed_op) {
5645 alu.src[0].sel = tmp2;
5646 alu.src[0].chan = 1;
5647 } else {
5648 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
5649 }
5650
5651 alu.src[1].sel = tmp0;
5652 alu.src[1].chan = 2;
5653
5654 alu.last = 1;
5655 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5656 return r;
5657 }
5658
5659 /* 12. tmp0.w = src1 - tmp0.y = r */
5660 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5661 alu.op = ALU_OP2_SUB_INT;
5662
5663 alu.dst.sel = tmp0;
5664 alu.dst.chan = 3;
5665 alu.dst.write = 1;
5666
5667 if (signed_op) {
5668 alu.src[0].sel = tmp2;
5669 alu.src[0].chan = 0;
5670 } else {
5671 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5672 }
5673
5674 alu.src[1].sel = tmp0;
5675 alu.src[1].chan = 1;
5676
5677 alu.last = 1;
5678 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5679 return r;
5680
5681 /* 13. tmp1.x = tmp0.w >= src2 = r >= src2 */
5682 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5683 alu.op = ALU_OP2_SETGE_UINT;
5684
5685 alu.dst.sel = tmp1;
5686 alu.dst.chan = 0;
5687 alu.dst.write = 1;
5688
5689 alu.src[0].sel = tmp0;
5690 alu.src[0].chan = 3;
5691 if (signed_op) {
5692 alu.src[1].sel = tmp2;
5693 alu.src[1].chan = 1;
5694 } else {
5695 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5696 }
5697
5698 alu.last = 1;
5699 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5700 return r;
5701
5702 /* 14. tmp1.y = src1 >= tmp0.y = r >= 0 */
5703 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5704 alu.op = ALU_OP2_SETGE_UINT;
5705
5706 alu.dst.sel = tmp1;
5707 alu.dst.chan = 1;
5708 alu.dst.write = 1;
5709
5710 if (signed_op) {
5711 alu.src[0].sel = tmp2;
5712 alu.src[0].chan = 0;
5713 } else {
5714 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5715 }
5716
5717 alu.src[1].sel = tmp0;
5718 alu.src[1].chan = 1;
5719
5720 alu.last = 1;
5721 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5722 return r;
5723
5724 if (mod) { /* UMOD */
5725
5726 /* 15. tmp1.z = tmp0.w - src2 = r - src2 */
5727 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5728 alu.op = ALU_OP2_SUB_INT;
5729
5730 alu.dst.sel = tmp1;
5731 alu.dst.chan = 2;
5732 alu.dst.write = 1;
5733
5734 alu.src[0].sel = tmp0;
5735 alu.src[0].chan = 3;
5736
5737 if (signed_op) {
5738 alu.src[1].sel = tmp2;
5739 alu.src[1].chan = 1;
5740 } else {
5741 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5742 }
5743
5744 alu.last = 1;
5745 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5746 return r;
5747
5748 /* 16. tmp1.w = tmp0.w + src2 = r + src2 */
5749 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5750 alu.op = ALU_OP2_ADD_INT;
5751
5752 alu.dst.sel = tmp1;
5753 alu.dst.chan = 3;
5754 alu.dst.write = 1;
5755
5756 alu.src[0].sel = tmp0;
5757 alu.src[0].chan = 3;
5758 if (signed_op) {
5759 alu.src[1].sel = tmp2;
5760 alu.src[1].chan = 1;
5761 } else {
5762 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5763 }
5764
5765 alu.last = 1;
5766 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5767 return r;
5768
5769 } else { /* UDIV */
5770
5771 /* 15. tmp1.z = tmp0.z + 1 = q + 1 DIV */
5772 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5773 alu.op = ALU_OP2_ADD_INT;
5774
5775 alu.dst.sel = tmp1;
5776 alu.dst.chan = 2;
5777 alu.dst.write = 1;
5778
5779 alu.src[0].sel = tmp0;
5780 alu.src[0].chan = 2;
5781 alu.src[1].sel = V_SQ_ALU_SRC_1_INT;
5782
5783 alu.last = 1;
5784 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5785 return r;
5786
5787 /* 16. tmp1.w = tmp0.z - 1 = q - 1 */
5788 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5789 alu.op = ALU_OP2_ADD_INT;
5790
5791 alu.dst.sel = tmp1;
5792 alu.dst.chan = 3;
5793 alu.dst.write = 1;
5794
5795 alu.src[0].sel = tmp0;
5796 alu.src[0].chan = 2;
5797 alu.src[1].sel = V_SQ_ALU_SRC_M_1_INT;
5798
5799 alu.last = 1;
5800 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5801 return r;
5802
5803 }
5804
5805 /* 17. tmp1.x = tmp1.x & tmp1.y */
5806 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5807 alu.op = ALU_OP2_AND_INT;
5808
5809 alu.dst.sel = tmp1;
5810 alu.dst.chan = 0;
5811 alu.dst.write = 1;
5812
5813 alu.src[0].sel = tmp1;
5814 alu.src[0].chan = 0;
5815 alu.src[1].sel = tmp1;
5816 alu.src[1].chan = 1;
5817
5818 alu.last = 1;
5819 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5820 return r;
5821
5822 /* 18. tmp0.z = tmp1.x==0 ? tmp0.z : tmp1.z DIV */
5823 /* 18. tmp0.z = tmp1.x==0 ? tmp0.w : tmp1.z MOD */
5824 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5825 alu.op = ALU_OP3_CNDE_INT;
5826 alu.is_op3 = 1;
5827
5828 alu.dst.sel = tmp0;
5829 alu.dst.chan = 2;
5830 alu.dst.write = 1;
5831
5832 alu.src[0].sel = tmp1;
5833 alu.src[0].chan = 0;
5834 alu.src[1].sel = tmp0;
5835 alu.src[1].chan = mod ? 3 : 2;
5836 alu.src[2].sel = tmp1;
5837 alu.src[2].chan = 2;
5838
5839 alu.last = 1;
5840 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5841 return r;
5842
5843 /* 19. tmp0.z = tmp1.y==0 ? tmp1.w : tmp0.z */
5844 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5845 alu.op = ALU_OP3_CNDE_INT;
5846 alu.is_op3 = 1;
5847
5848 if (signed_op) {
5849 alu.dst.sel = tmp0;
5850 alu.dst.chan = 2;
5851 alu.dst.write = 1;
5852 } else {
5853 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5854 }
5855
5856 alu.src[0].sel = tmp1;
5857 alu.src[0].chan = 1;
5858 alu.src[1].sel = tmp1;
5859 alu.src[1].chan = 3;
5860 alu.src[2].sel = tmp0;
5861 alu.src[2].chan = 2;
5862
5863 alu.last = 1;
5864 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5865 return r;
5866
5867 if (signed_op) {
5868
5869 /* fix the sign of the result */
5870
5871 if (mod) {
5872
5873 /* tmp0.x = -tmp0.z */
5874 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5875 alu.op = ALU_OP2_SUB_INT;
5876
5877 alu.dst.sel = tmp0;
5878 alu.dst.chan = 0;
5879 alu.dst.write = 1;
5880
5881 alu.src[0].sel = V_SQ_ALU_SRC_0;
5882 alu.src[1].sel = tmp0;
5883 alu.src[1].chan = 2;
5884
5885 alu.last = 1;
5886 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5887 return r;
5888
5889 /* sign of the remainder is the same as the sign of src0 */
5890 /* tmp0.x = src0>=0 ? tmp0.z : tmp0.x */
5891 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5892 alu.op = ALU_OP3_CNDGE_INT;
5893 alu.is_op3 = 1;
5894
5895 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5896
5897 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5898 alu.src[1].sel = tmp0;
5899 alu.src[1].chan = 2;
5900 alu.src[2].sel = tmp0;
5901 alu.src[2].chan = 0;
5902
5903 alu.last = 1;
5904 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5905 return r;
5906
5907 } else {
5908
5909 /* tmp0.x = -tmp0.z */
5910 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5911 alu.op = ALU_OP2_SUB_INT;
5912
5913 alu.dst.sel = tmp0;
5914 alu.dst.chan = 0;
5915 alu.dst.write = 1;
5916
5917 alu.src[0].sel = V_SQ_ALU_SRC_0;
5918 alu.src[1].sel = tmp0;
5919 alu.src[1].chan = 2;
5920
5921 alu.last = 1;
5922 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5923 return r;
5924
5925 /* fix the quotient sign (same as the sign of src0*src1) */
5926 /* tmp0.x = tmp2.z>=0 ? tmp0.z : tmp0.x */
5927 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5928 alu.op = ALU_OP3_CNDGE_INT;
5929 alu.is_op3 = 1;
5930
5931 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5932
5933 alu.src[0].sel = tmp2;
5934 alu.src[0].chan = 2;
5935 alu.src[1].sel = tmp0;
5936 alu.src[1].chan = 2;
5937 alu.src[2].sel = tmp0;
5938 alu.src[2].chan = 0;
5939
5940 alu.last = 1;
5941 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5942 return r;
5943 }
5944 }
5945 }
5946 return 0;
5947 }
5948
5949 static int tgsi_udiv(struct r600_shader_ctx *ctx)
5950 {
5951 return tgsi_divmod(ctx, 0, 0);
5952 }
5953
5954 static int tgsi_umod(struct r600_shader_ctx *ctx)
5955 {
5956 return tgsi_divmod(ctx, 1, 0);
5957 }
5958
5959 static int tgsi_idiv(struct r600_shader_ctx *ctx)
5960 {
5961 return tgsi_divmod(ctx, 0, 1);
5962 }
5963
5964 static int tgsi_imod(struct r600_shader_ctx *ctx)
5965 {
5966 return tgsi_divmod(ctx, 1, 1);
5967 }
5968
5969
5970 static int tgsi_f2i(struct r600_shader_ctx *ctx)
5971 {
5972 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5973 struct r600_bytecode_alu alu;
5974 int i, r;
5975 unsigned write_mask = inst->Dst[0].Register.WriteMask;
5976 int last_inst = tgsi_last_instruction(write_mask);
5977
5978 for (i = 0; i < 4; i++) {
5979 if (!(write_mask & (1<<i)))
5980 continue;
5981
5982 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5983 alu.op = ALU_OP1_TRUNC;
5984
5985 alu.dst.sel = ctx->temp_reg;
5986 alu.dst.chan = i;
5987 alu.dst.write = 1;
5988
5989 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5990 if (i == last_inst)
5991 alu.last = 1;
5992 r = r600_bytecode_add_alu(ctx->bc, &alu);
5993 if (r)
5994 return r;
5995 }
5996
5997 for (i = 0; i < 4; i++) {
5998 if (!(write_mask & (1<<i)))
5999 continue;
6000
6001 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6002 alu.op = ctx->inst_info->op;
6003
6004 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6005
6006 alu.src[0].sel = ctx->temp_reg;
6007 alu.src[0].chan = i;
6008
6009 if (i == last_inst || alu.op == ALU_OP1_FLT_TO_UINT)
6010 alu.last = 1;
6011 r = r600_bytecode_add_alu(ctx->bc, &alu);
6012 if (r)
6013 return r;
6014 }
6015
6016 return 0;
6017 }
6018
6019 static int tgsi_iabs(struct r600_shader_ctx *ctx)
6020 {
6021 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6022 struct r600_bytecode_alu alu;
6023 int i, r;
6024 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6025 int last_inst = tgsi_last_instruction(write_mask);
6026
6027 /* tmp = -src */
6028 for (i = 0; i < 4; i++) {
6029 if (!(write_mask & (1<<i)))
6030 continue;
6031
6032 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6033 alu.op = ALU_OP2_SUB_INT;
6034
6035 alu.dst.sel = ctx->temp_reg;
6036 alu.dst.chan = i;
6037 alu.dst.write = 1;
6038
6039 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
6040 alu.src[0].sel = V_SQ_ALU_SRC_0;
6041
6042 if (i == last_inst)
6043 alu.last = 1;
6044 r = r600_bytecode_add_alu(ctx->bc, &alu);
6045 if (r)
6046 return r;
6047 }
6048
6049 /* dst = (src >= 0 ? src : tmp) */
6050 for (i = 0; i < 4; i++) {
6051 if (!(write_mask & (1<<i)))
6052 continue;
6053
6054 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6055 alu.op = ALU_OP3_CNDGE_INT;
6056 alu.is_op3 = 1;
6057 alu.dst.write = 1;
6058
6059 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6060
6061 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6062 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
6063 alu.src[2].sel = ctx->temp_reg;
6064 alu.src[2].chan = i;
6065
6066 if (i == last_inst)
6067 alu.last = 1;
6068 r = r600_bytecode_add_alu(ctx->bc, &alu);
6069 if (r)
6070 return r;
6071 }
6072 return 0;
6073 }
6074
6075 static int tgsi_issg(struct r600_shader_ctx *ctx)
6076 {
6077 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6078 struct r600_bytecode_alu alu;
6079 int i, r;
6080 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6081 int last_inst = tgsi_last_instruction(write_mask);
6082
6083 /* tmp = (src >= 0 ? src : -1) */
6084 for (i = 0; i < 4; i++) {
6085 if (!(write_mask & (1<<i)))
6086 continue;
6087
6088 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6089 alu.op = ALU_OP3_CNDGE_INT;
6090 alu.is_op3 = 1;
6091
6092 alu.dst.sel = ctx->temp_reg;
6093 alu.dst.chan = i;
6094 alu.dst.write = 1;
6095
6096 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6097 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
6098 alu.src[2].sel = V_SQ_ALU_SRC_M_1_INT;
6099
6100 if (i == last_inst)
6101 alu.last = 1;
6102 r = r600_bytecode_add_alu(ctx->bc, &alu);
6103 if (r)
6104 return r;
6105 }
6106
6107 /* dst = (tmp > 0 ? 1 : tmp) */
6108 for (i = 0; i < 4; i++) {
6109 if (!(write_mask & (1<<i)))
6110 continue;
6111
6112 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6113 alu.op = ALU_OP3_CNDGT_INT;
6114 alu.is_op3 = 1;
6115 alu.dst.write = 1;
6116
6117 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6118
6119 alu.src[0].sel = ctx->temp_reg;
6120 alu.src[0].chan = i;
6121
6122 alu.src[1].sel = V_SQ_ALU_SRC_1_INT;
6123
6124 alu.src[2].sel = ctx->temp_reg;
6125 alu.src[2].chan = i;
6126
6127 if (i == last_inst)
6128 alu.last = 1;
6129 r = r600_bytecode_add_alu(ctx->bc, &alu);
6130 if (r)
6131 return r;
6132 }
6133 return 0;
6134 }
6135
6136
6137
6138 static int tgsi_ssg(struct r600_shader_ctx *ctx)
6139 {
6140 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6141 struct r600_bytecode_alu alu;
6142 int i, r;
6143
6144 /* tmp = (src > 0 ? 1 : src) */
6145 for (i = 0; i < 4; i++) {
6146 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6147 alu.op = ALU_OP3_CNDGT;
6148 alu.is_op3 = 1;
6149
6150 alu.dst.sel = ctx->temp_reg;
6151 alu.dst.chan = i;
6152
6153 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6154 alu.src[1].sel = V_SQ_ALU_SRC_1;
6155 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
6156
6157 if (i == 3)
6158 alu.last = 1;
6159 r = r600_bytecode_add_alu(ctx->bc, &alu);
6160 if (r)
6161 return r;
6162 }
6163
6164 /* dst = (-tmp > 0 ? -1 : tmp) */
6165 for (i = 0; i < 4; i++) {
6166 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6167 alu.op = ALU_OP3_CNDGT;
6168 alu.is_op3 = 1;
6169 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6170
6171 alu.src[0].sel = ctx->temp_reg;
6172 alu.src[0].chan = i;
6173 alu.src[0].neg = 1;
6174
6175 alu.src[1].sel = V_SQ_ALU_SRC_1;
6176 alu.src[1].neg = 1;
6177
6178 alu.src[2].sel = ctx->temp_reg;
6179 alu.src[2].chan = i;
6180
6181 if (i == 3)
6182 alu.last = 1;
6183 r = r600_bytecode_add_alu(ctx->bc, &alu);
6184 if (r)
6185 return r;
6186 }
6187 return 0;
6188 }
6189
6190 static int tgsi_bfi(struct r600_shader_ctx *ctx)
6191 {
6192 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6193 struct r600_bytecode_alu alu;
6194 int i, r, t1, t2;
6195
6196 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6197 int last_inst = tgsi_last_instruction(write_mask);
6198
6199 t1 = r600_get_temp(ctx);
6200
6201 for (i = 0; i < 4; i++) {
6202 if (!(write_mask & (1<<i)))
6203 continue;
6204
6205 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6206 alu.op = ALU_OP2_SETGE_INT;
6207 r600_bytecode_src(&alu.src[0], &ctx->src[3], i);
6208 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6209 alu.src[1].value = 32;
6210 alu.dst.sel = ctx->temp_reg;
6211 alu.dst.chan = i;
6212 alu.dst.write = 1;
6213 alu.last = i == last_inst;
6214 r = r600_bytecode_add_alu(ctx->bc, &alu);
6215 if (r)
6216 return r;
6217 }
6218
6219 for (i = 0; i < 4; i++) {
6220 if (!(write_mask & (1<<i)))
6221 continue;
6222
6223 /* create mask tmp */
6224 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6225 alu.op = ALU_OP2_BFM_INT;
6226 alu.dst.sel = t1;
6227 alu.dst.chan = i;
6228 alu.dst.write = 1;
6229 alu.last = i == last_inst;
6230
6231 r600_bytecode_src(&alu.src[0], &ctx->src[3], i);
6232 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
6233
6234 r = r600_bytecode_add_alu(ctx->bc, &alu);
6235 if (r)
6236 return r;
6237 }
6238
6239 t2 = r600_get_temp(ctx);
6240
6241 for (i = 0; i < 4; i++) {
6242 if (!(write_mask & (1<<i)))
6243 continue;
6244
6245 /* shift insert left */
6246 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6247 alu.op = ALU_OP2_LSHL_INT;
6248 alu.dst.sel = t2;
6249 alu.dst.chan = i;
6250 alu.dst.write = 1;
6251 alu.last = i == last_inst;
6252
6253 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
6254 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
6255
6256 r = r600_bytecode_add_alu(ctx->bc, &alu);
6257 if (r)
6258 return r;
6259 }
6260
6261 for (i = 0; i < 4; i++) {
6262 if (!(write_mask & (1<<i)))
6263 continue;
6264
6265 /* actual bitfield insert */
6266 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6267 alu.op = ALU_OP3_BFI_INT;
6268 alu.is_op3 = 1;
6269 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6270 alu.dst.chan = i;
6271 alu.dst.write = 1;
6272 alu.last = i == last_inst;
6273
6274 alu.src[0].sel = t1;
6275 alu.src[0].chan = i;
6276 alu.src[1].sel = t2;
6277 alu.src[1].chan = i;
6278 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
6279
6280 r = r600_bytecode_add_alu(ctx->bc, &alu);
6281 if (r)
6282 return r;
6283 }
6284
6285 for (i = 0; i < 4; i++) {
6286 if (!(write_mask & (1<<i)))
6287 continue;
6288 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6289 alu.op = ALU_OP3_CNDE_INT;
6290 alu.is_op3 = 1;
6291 alu.src[0].sel = ctx->temp_reg;
6292 alu.src[0].chan = i;
6293 r600_bytecode_src(&alu.src[2], &ctx->src[1], i);
6294
6295 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6296
6297 alu.src[1].sel = alu.dst.sel;
6298 alu.src[1].chan = i;
6299
6300 alu.last = i == last_inst;
6301 r = r600_bytecode_add_alu(ctx->bc, &alu);
6302 if (r)
6303 return r;
6304 }
6305 return 0;
6306 }
6307
6308 static int tgsi_msb(struct r600_shader_ctx *ctx)
6309 {
6310 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6311 struct r600_bytecode_alu alu;
6312 int i, r, t1, t2;
6313
6314 unsigned write_mask = inst->Dst[0].Register.WriteMask;
6315 int last_inst = tgsi_last_instruction(write_mask);
6316
6317 assert(ctx->inst_info->op == ALU_OP1_FFBH_INT ||
6318 ctx->inst_info->op == ALU_OP1_FFBH_UINT);
6319
6320 t1 = ctx->temp_reg;
6321
6322 /* bit position is indexed from lsb by TGSI, and from msb by the hardware */
6323 for (i = 0; i < 4; i++) {
6324 if (!(write_mask & (1<<i)))
6325 continue;
6326
6327 /* t1 = FFBH_INT / FFBH_UINT */
6328 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6329 alu.op = ctx->inst_info->op;
6330 alu.dst.sel = t1;
6331 alu.dst.chan = i;
6332 alu.dst.write = 1;
6333 alu.last = i == last_inst;
6334
6335 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6336
6337 r = r600_bytecode_add_alu(ctx->bc, &alu);
6338 if (r)
6339 return r;
6340 }
6341
6342 t2 = r600_get_temp(ctx);
6343
6344 for (i = 0; i < 4; i++) {
6345 if (!(write_mask & (1<<i)))
6346 continue;
6347
6348 /* t2 = 31 - t1 */
6349 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6350 alu.op = ALU_OP2_SUB_INT;
6351 alu.dst.sel = t2;
6352 alu.dst.chan = i;
6353 alu.dst.write = 1;
6354 alu.last = i == last_inst;
6355
6356 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
6357 alu.src[0].value = 31;
6358 alu.src[1].sel = t1;
6359 alu.src[1].chan = i;
6360
6361 r = r600_bytecode_add_alu(ctx->bc, &alu);
6362 if (r)
6363 return r;
6364 }
6365
6366 for (i = 0; i < 4; i++) {
6367 if (!(write_mask & (1<<i)))
6368 continue;
6369
6370 /* result = t1 >= 0 ? t2 : t1 */
6371 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6372 alu.op = ALU_OP3_CNDGE_INT;
6373 alu.is_op3 = 1;
6374 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6375 alu.dst.chan = i;
6376 alu.dst.write = 1;
6377 alu.last = i == last_inst;
6378
6379 alu.src[0].sel = t1;
6380 alu.src[0].chan = i;
6381 alu.src[1].sel = t2;
6382 alu.src[1].chan = i;
6383 alu.src[2].sel = t1;
6384 alu.src[2].chan = i;
6385
6386 r = r600_bytecode_add_alu(ctx->bc, &alu);
6387 if (r)
6388 return r;
6389 }
6390
6391 return 0;
6392 }
6393
6394 static int tgsi_interp_egcm(struct r600_shader_ctx *ctx)
6395 {
6396 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6397 struct r600_bytecode_alu alu;
6398 int r, i = 0, k, interp_gpr, interp_base_chan, tmp, lasti;
6399 unsigned location;
6400 const int input = inst->Src[0].Register.Index + ctx->shader->nsys_inputs;
6401
6402 assert(inst->Src[0].Register.File == TGSI_FILE_INPUT);
6403
6404 /* Interpolators have been marked for use already by allocate_system_value_inputs */
6405 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
6406 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
6407 location = TGSI_INTERPOLATE_LOC_CENTER; /* sample offset will be added explicitly */
6408 }
6409 else {
6410 location = TGSI_INTERPOLATE_LOC_CENTROID;
6411 }
6412
6413 k = eg_get_interpolator_index(ctx->shader->input[input].interpolate, location);
6414 if (k < 0)
6415 k = 0;
6416 interp_gpr = ctx->eg_interpolators[k].ij_index / 2;
6417 interp_base_chan = 2 * (ctx->eg_interpolators[k].ij_index % 2);
6418
6419 /* NOTE: currently offset is not perspective correct */
6420 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
6421 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
6422 int sample_gpr = -1;
6423 int gradientsH, gradientsV;
6424 struct r600_bytecode_tex tex;
6425
6426 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
6427 sample_gpr = load_sample_position(ctx, &ctx->src[1], ctx->src[1].swizzle[0]);
6428 }
6429
6430 gradientsH = r600_get_temp(ctx);
6431 gradientsV = r600_get_temp(ctx);
6432 for (i = 0; i < 2; i++) {
6433 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
6434 tex.op = i == 0 ? FETCH_OP_GET_GRADIENTS_H : FETCH_OP_GET_GRADIENTS_V;
6435 tex.src_gpr = interp_gpr;
6436 tex.src_sel_x = interp_base_chan + 0;
6437 tex.src_sel_y = interp_base_chan + 1;
6438 tex.src_sel_z = 0;
6439 tex.src_sel_w = 0;
6440 tex.dst_gpr = i == 0 ? gradientsH : gradientsV;
6441 tex.dst_sel_x = 0;
6442 tex.dst_sel_y = 1;
6443 tex.dst_sel_z = 7;
6444 tex.dst_sel_w = 7;
6445 tex.inst_mod = 1; // Use per pixel gradient calculation
6446 tex.sampler_id = 0;
6447 tex.resource_id = tex.sampler_id;
6448 r = r600_bytecode_add_tex(ctx->bc, &tex);
6449 if (r)
6450 return r;
6451 }
6452
6453 for (i = 0; i < 2; i++) {
6454 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6455 alu.op = ALU_OP3_MULADD;
6456 alu.is_op3 = 1;
6457 alu.src[0].sel = gradientsH;
6458 alu.src[0].chan = i;
6459 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
6460 alu.src[1].sel = sample_gpr;
6461 alu.src[1].chan = 2;
6462 }
6463 else {
6464 r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
6465 }
6466 alu.src[2].sel = interp_gpr;
6467 alu.src[2].chan = interp_base_chan + i;
6468 alu.dst.sel = ctx->temp_reg;
6469 alu.dst.chan = i;
6470 alu.last = i == 1;
6471
6472 r = r600_bytecode_add_alu(ctx->bc, &alu);
6473 if (r)
6474 return r;
6475 }
6476
6477 for (i = 0; i < 2; i++) {
6478 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6479 alu.op = ALU_OP3_MULADD;
6480 alu.is_op3 = 1;
6481 alu.src[0].sel = gradientsV;
6482 alu.src[0].chan = i;
6483 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
6484 alu.src[1].sel = sample_gpr;
6485 alu.src[1].chan = 3;
6486 }
6487 else {
6488 r600_bytecode_src(&alu.src[1], &ctx->src[1], 1);
6489 }
6490 alu.src[2].sel = ctx->temp_reg;
6491 alu.src[2].chan = i;
6492 alu.dst.sel = ctx->temp_reg;
6493 alu.dst.chan = i;
6494 alu.last = i == 1;
6495
6496 r = r600_bytecode_add_alu(ctx->bc, &alu);
6497 if (r)
6498 return r;
6499 }
6500 }
6501
6502 tmp = r600_get_temp(ctx);
6503 for (i = 0; i < 8; i++) {
6504 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6505 alu.op = i < 4 ? ALU_OP2_INTERP_ZW : ALU_OP2_INTERP_XY;
6506
6507 alu.dst.sel = tmp;
6508 if ((i > 1 && i < 6)) {
6509 alu.dst.write = 1;
6510 }
6511 else {
6512 alu.dst.write = 0;
6513 }
6514 alu.dst.chan = i % 4;
6515
6516 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
6517 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
6518 alu.src[0].sel = ctx->temp_reg;
6519 alu.src[0].chan = 1 - (i % 2);
6520 } else {
6521 alu.src[0].sel = interp_gpr;
6522 alu.src[0].chan = interp_base_chan + 1 - (i % 2);
6523 }
6524 alu.src[1].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
6525 alu.src[1].chan = 0;
6526
6527 alu.last = i % 4 == 3;
6528 alu.bank_swizzle_force = SQ_ALU_VEC_210;
6529
6530 r = r600_bytecode_add_alu(ctx->bc, &alu);
6531 if (r)
6532 return r;
6533 }
6534
6535 // INTERP can't swizzle dst
6536 lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
6537 for (i = 0; i <= lasti; i++) {
6538 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6539 continue;
6540
6541 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6542 alu.op = ALU_OP1_MOV;
6543 alu.src[0].sel = tmp;
6544 alu.src[0].chan = ctx->src[0].swizzle[i];
6545 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6546 alu.dst.write = 1;
6547 alu.last = i == lasti;
6548 r = r600_bytecode_add_alu(ctx->bc, &alu);
6549 if (r)
6550 return r;
6551 }
6552
6553 return 0;
6554 }
6555
6556
6557 static int tgsi_helper_copy(struct r600_shader_ctx *ctx, struct tgsi_full_instruction *inst)
6558 {
6559 struct r600_bytecode_alu alu;
6560 int i, r;
6561
6562 for (i = 0; i < 4; i++) {
6563 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6564 if (!(inst->Dst[0].Register.WriteMask & (1 << i))) {
6565 alu.op = ALU_OP0_NOP;
6566 alu.dst.chan = i;
6567 } else {
6568 alu.op = ALU_OP1_MOV;
6569 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6570 alu.src[0].sel = ctx->temp_reg;
6571 alu.src[0].chan = i;
6572 }
6573 if (i == 3) {
6574 alu.last = 1;
6575 }
6576 r = r600_bytecode_add_alu(ctx->bc, &alu);
6577 if (r)
6578 return r;
6579 }
6580 return 0;
6581 }
6582
6583 static int tgsi_make_src_for_op3(struct r600_shader_ctx *ctx,
6584 unsigned temp, int chan,
6585 struct r600_bytecode_alu_src *bc_src,
6586 const struct r600_shader_src *shader_src)
6587 {
6588 struct r600_bytecode_alu alu;
6589 int r;
6590
6591 r600_bytecode_src(bc_src, shader_src, chan);
6592
6593 /* op3 operands don't support abs modifier */
6594 if (bc_src->abs) {
6595 assert(temp!=0); /* we actually need the extra register, make sure it is allocated. */
6596 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6597 alu.op = ALU_OP1_MOV;
6598 alu.dst.sel = temp;
6599 alu.dst.chan = chan;
6600 alu.dst.write = 1;
6601
6602 alu.src[0] = *bc_src;
6603 alu.last = true; // sufficient?
6604 r = r600_bytecode_add_alu(ctx->bc, &alu);
6605 if (r)
6606 return r;
6607
6608 memset(bc_src, 0, sizeof(*bc_src));
6609 bc_src->sel = temp;
6610 bc_src->chan = chan;
6611 }
6612 return 0;
6613 }
6614
6615 static int tgsi_op3(struct r600_shader_ctx *ctx)
6616 {
6617 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6618 struct r600_bytecode_alu alu;
6619 int i, j, r;
6620 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
6621 int temp_regs[4];
6622 unsigned op = ctx->inst_info->op;
6623
6624 if (op == ALU_OP3_MULADD_IEEE &&
6625 ctx->info.properties[TGSI_PROPERTY_MUL_ZERO_WINS])
6626 op = ALU_OP3_MULADD;
6627
6628 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
6629 temp_regs[j] = 0;
6630 if (ctx->src[j].abs)
6631 temp_regs[j] = r600_get_temp(ctx);
6632 }
6633 for (i = 0; i < lasti + 1; i++) {
6634 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6635 continue;
6636
6637 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6638 alu.op = op;
6639 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
6640 r = tgsi_make_src_for_op3(ctx, temp_regs[j], i, &alu.src[j], &ctx->src[j]);
6641 if (r)
6642 return r;
6643 }
6644
6645 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6646 alu.dst.chan = i;
6647 alu.dst.write = 1;
6648 alu.is_op3 = 1;
6649 if (i == lasti) {
6650 alu.last = 1;
6651 }
6652 r = r600_bytecode_add_alu(ctx->bc, &alu);
6653 if (r)
6654 return r;
6655 }
6656 return 0;
6657 }
6658
6659 static int tgsi_dp(struct r600_shader_ctx *ctx)
6660 {
6661 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6662 struct r600_bytecode_alu alu;
6663 int i, j, r;
6664 unsigned op = ctx->inst_info->op;
6665 if (op == ALU_OP2_DOT4_IEEE &&
6666 ctx->info.properties[TGSI_PROPERTY_MUL_ZERO_WINS])
6667 op = ALU_OP2_DOT4;
6668
6669 for (i = 0; i < 4; i++) {
6670 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6671 alu.op = op;
6672 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
6673 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
6674 }
6675
6676 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6677 alu.dst.chan = i;
6678 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
6679 /* handle some special cases */
6680 switch (inst->Instruction.Opcode) {
6681 case TGSI_OPCODE_DP2:
6682 if (i > 1) {
6683 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
6684 alu.src[0].chan = alu.src[1].chan = 0;
6685 }
6686 break;
6687 case TGSI_OPCODE_DP3:
6688 if (i > 2) {
6689 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
6690 alu.src[0].chan = alu.src[1].chan = 0;
6691 }
6692 break;
6693 default:
6694 break;
6695 }
6696 if (i == 3) {
6697 alu.last = 1;
6698 }
6699 r = r600_bytecode_add_alu(ctx->bc, &alu);
6700 if (r)
6701 return r;
6702 }
6703 return 0;
6704 }
6705
6706 static inline boolean tgsi_tex_src_requires_loading(struct r600_shader_ctx *ctx,
6707 unsigned index)
6708 {
6709 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6710 return (inst->Src[index].Register.File != TGSI_FILE_TEMPORARY &&
6711 inst->Src[index].Register.File != TGSI_FILE_INPUT &&
6712 inst->Src[index].Register.File != TGSI_FILE_OUTPUT) ||
6713 ctx->src[index].neg || ctx->src[index].abs ||
6714 (inst->Src[index].Register.File == TGSI_FILE_INPUT && ctx->type == PIPE_SHADER_GEOMETRY);
6715 }
6716
6717 static inline unsigned tgsi_tex_get_src_gpr(struct r600_shader_ctx *ctx,
6718 unsigned index)
6719 {
6720 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6721 return ctx->file_offset[inst->Src[index].Register.File] + inst->Src[index].Register.Index;
6722 }
6723
6724 static int do_vtx_fetch_inst(struct r600_shader_ctx *ctx, boolean src_requires_loading)
6725 {
6726 struct r600_bytecode_vtx vtx;
6727 struct r600_bytecode_alu alu;
6728 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6729 int src_gpr, r, i;
6730 int id = tgsi_tex_get_src_gpr(ctx, 1);
6731
6732 src_gpr = tgsi_tex_get_src_gpr(ctx, 0);
6733 if (src_requires_loading) {
6734 for (i = 0; i < 4; i++) {
6735 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6736 alu.op = ALU_OP1_MOV;
6737 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6738 alu.dst.sel = ctx->temp_reg;
6739 alu.dst.chan = i;
6740 if (i == 3)
6741 alu.last = 1;
6742 alu.dst.write = 1;
6743 r = r600_bytecode_add_alu(ctx->bc, &alu);
6744 if (r)
6745 return r;
6746 }
6747 src_gpr = ctx->temp_reg;
6748 }
6749
6750 memset(&vtx, 0, sizeof(vtx));
6751 vtx.op = FETCH_OP_VFETCH;
6752 vtx.buffer_id = id + R600_MAX_CONST_BUFFERS;
6753 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
6754 vtx.src_gpr = src_gpr;
6755 vtx.mega_fetch_count = 16;
6756 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
6757 vtx.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; /* SEL_X */
6758 vtx.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7; /* SEL_Y */
6759 vtx.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7; /* SEL_Z */
6760 vtx.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7; /* SEL_W */
6761 vtx.use_const_fields = 1;
6762
6763 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
6764 return r;
6765
6766 if (ctx->bc->chip_class >= EVERGREEN)
6767 return 0;
6768
6769 for (i = 0; i < 4; i++) {
6770 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
6771 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6772 continue;
6773
6774 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6775 alu.op = ALU_OP2_AND_INT;
6776
6777 alu.dst.chan = i;
6778 alu.dst.sel = vtx.dst_gpr;
6779 alu.dst.write = 1;
6780
6781 alu.src[0].sel = vtx.dst_gpr;
6782 alu.src[0].chan = i;
6783
6784 alu.src[1].sel = R600_SHADER_BUFFER_INFO_SEL;
6785 alu.src[1].sel += (id * 2);
6786 alu.src[1].chan = i % 4;
6787 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
6788
6789 if (i == lasti)
6790 alu.last = 1;
6791 r = r600_bytecode_add_alu(ctx->bc, &alu);
6792 if (r)
6793 return r;
6794 }
6795
6796 if (inst->Dst[0].Register.WriteMask & 3) {
6797 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6798 alu.op = ALU_OP2_OR_INT;
6799
6800 alu.dst.chan = 3;
6801 alu.dst.sel = vtx.dst_gpr;
6802 alu.dst.write = 1;
6803
6804 alu.src[0].sel = vtx.dst_gpr;
6805 alu.src[0].chan = 3;
6806
6807 alu.src[1].sel = R600_SHADER_BUFFER_INFO_SEL + (id * 2) + 1;
6808 alu.src[1].chan = 0;
6809 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
6810
6811 alu.last = 1;
6812 r = r600_bytecode_add_alu(ctx->bc, &alu);
6813 if (r)
6814 return r;
6815 }
6816 return 0;
6817 }
6818
6819 static int r600_do_buffer_txq(struct r600_shader_ctx *ctx, int reg_idx, int offset)
6820 {
6821 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6822 struct r600_bytecode_alu alu;
6823 int r;
6824 int id = tgsi_tex_get_src_gpr(ctx, reg_idx) + offset;
6825
6826 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6827 alu.op = ALU_OP1_MOV;
6828 alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
6829 if (ctx->bc->chip_class >= EVERGREEN) {
6830 /* channel 0 or 2 of each word */
6831 alu.src[0].sel += (id / 2);
6832 alu.src[0].chan = (id % 2) * 2;
6833 } else {
6834 /* r600 we have them at channel 2 of the second dword */
6835 alu.src[0].sel += (id * 2) + 1;
6836 alu.src[0].chan = 1;
6837 }
6838 alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
6839 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
6840 alu.last = 1;
6841 r = r600_bytecode_add_alu(ctx->bc, &alu);
6842 if (r)
6843 return r;
6844 return 0;
6845 }
6846
6847 static int tgsi_tex(struct r600_shader_ctx *ctx)
6848 {
6849 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6850 struct r600_bytecode_tex tex;
6851 struct r600_bytecode_alu alu;
6852 unsigned src_gpr;
6853 int r, i, j;
6854 int opcode;
6855 bool read_compressed_msaa = ctx->bc->has_compressed_msaa_texturing &&
6856 inst->Instruction.Opcode == TGSI_OPCODE_TXF &&
6857 (inst->Texture.Texture == TGSI_TEXTURE_2D_MSAA ||
6858 inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY_MSAA);
6859
6860 bool txf_add_offsets = inst->Texture.NumOffsets &&
6861 inst->Instruction.Opcode == TGSI_OPCODE_TXF &&
6862 inst->Texture.Texture != TGSI_TEXTURE_BUFFER;
6863
6864 /* Texture fetch instructions can only use gprs as source.
6865 * Also they cannot negate the source or take the absolute value */
6866 const boolean src_requires_loading = (inst->Instruction.Opcode != TGSI_OPCODE_TXQS &&
6867 tgsi_tex_src_requires_loading(ctx, 0)) ||
6868 read_compressed_msaa || txf_add_offsets;
6869
6870 boolean src_loaded = FALSE;
6871 unsigned sampler_src_reg = 1;
6872 int8_t offset_x = 0, offset_y = 0, offset_z = 0;
6873 boolean has_txq_cube_array_z = false;
6874 unsigned sampler_index_mode;
6875
6876 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ &&
6877 ((inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
6878 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY)))
6879 if (inst->Dst[0].Register.WriteMask & 4) {
6880 ctx->shader->has_txq_cube_array_z_comp = true;
6881 has_txq_cube_array_z = true;
6882 }
6883
6884 if (inst->Instruction.Opcode == TGSI_OPCODE_TEX2 ||
6885 inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
6886 inst->Instruction.Opcode == TGSI_OPCODE_TXL2 ||
6887 inst->Instruction.Opcode == TGSI_OPCODE_TG4)
6888 sampler_src_reg = 2;
6889
6890 /* TGSI moves the sampler to src reg 3 for TXD */
6891 if (inst->Instruction.Opcode == TGSI_OPCODE_TXD)
6892 sampler_src_reg = 3;
6893
6894 sampler_index_mode = inst->Src[sampler_src_reg].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
6895
6896 src_gpr = tgsi_tex_get_src_gpr(ctx, 0);
6897
6898 if (inst->Texture.Texture == TGSI_TEXTURE_BUFFER) {
6899 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ) {
6900 ctx->shader->uses_tex_buffers = true;
6901 return r600_do_buffer_txq(ctx, 1, 0);
6902 }
6903 else if (inst->Instruction.Opcode == TGSI_OPCODE_TXF) {
6904 if (ctx->bc->chip_class < EVERGREEN)
6905 ctx->shader->uses_tex_buffers = true;
6906 return do_vtx_fetch_inst(ctx, src_requires_loading);
6907 }
6908 }
6909
6910 if (inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
6911 int out_chan;
6912 /* Add perspective divide */
6913 if (ctx->bc->chip_class == CAYMAN) {
6914 out_chan = 2;
6915 for (i = 0; i < 3; i++) {
6916 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6917 alu.op = ALU_OP1_RECIP_IEEE;
6918 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
6919
6920 alu.dst.sel = ctx->temp_reg;
6921 alu.dst.chan = i;
6922 if (i == 2)
6923 alu.last = 1;
6924 if (out_chan == i)
6925 alu.dst.write = 1;
6926 r = r600_bytecode_add_alu(ctx->bc, &alu);
6927 if (r)
6928 return r;
6929 }
6930
6931 } else {
6932 out_chan = 3;
6933 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6934 alu.op = ALU_OP1_RECIP_IEEE;
6935 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
6936
6937 alu.dst.sel = ctx->temp_reg;
6938 alu.dst.chan = out_chan;
6939 alu.last = 1;
6940 alu.dst.write = 1;
6941 r = r600_bytecode_add_alu(ctx->bc, &alu);
6942 if (r)
6943 return r;
6944 }
6945
6946 for (i = 0; i < 3; i++) {
6947 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6948 alu.op = ALU_OP2_MUL;
6949 alu.src[0].sel = ctx->temp_reg;
6950 alu.src[0].chan = out_chan;
6951 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
6952 alu.dst.sel = ctx->temp_reg;
6953 alu.dst.chan = i;
6954 alu.dst.write = 1;
6955 r = r600_bytecode_add_alu(ctx->bc, &alu);
6956 if (r)
6957 return r;
6958 }
6959 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6960 alu.op = ALU_OP1_MOV;
6961 alu.src[0].sel = V_SQ_ALU_SRC_1;
6962 alu.src[0].chan = 0;
6963 alu.dst.sel = ctx->temp_reg;
6964 alu.dst.chan = 3;
6965 alu.last = 1;
6966 alu.dst.write = 1;
6967 r = r600_bytecode_add_alu(ctx->bc, &alu);
6968 if (r)
6969 return r;
6970 src_loaded = TRUE;
6971 src_gpr = ctx->temp_reg;
6972 }
6973
6974
6975 if ((inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
6976 inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
6977 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
6978 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) &&
6979 inst->Instruction.Opcode != TGSI_OPCODE_TXQ) {
6980
6981 static const unsigned src0_swizzle[] = {2, 2, 0, 1};
6982 static const unsigned src1_swizzle[] = {1, 0, 2, 2};
6983
6984 /* tmp1.xyzw = CUBE(R0.zzxy, R0.yxzz) */
6985 for (i = 0; i < 4; i++) {
6986 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6987 alu.op = ALU_OP2_CUBE;
6988 r600_bytecode_src(&alu.src[0], &ctx->src[0], src0_swizzle[i]);
6989 r600_bytecode_src(&alu.src[1], &ctx->src[0], src1_swizzle[i]);
6990 alu.dst.sel = ctx->temp_reg;
6991 alu.dst.chan = i;
6992 if (i == 3)
6993 alu.last = 1;
6994 alu.dst.write = 1;
6995 r = r600_bytecode_add_alu(ctx->bc, &alu);
6996 if (r)
6997 return r;
6998 }
6999
7000 /* tmp1.z = RCP_e(|tmp1.z|) */
7001 if (ctx->bc->chip_class == CAYMAN) {
7002 for (i = 0; i < 3; i++) {
7003 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7004 alu.op = ALU_OP1_RECIP_IEEE;
7005 alu.src[0].sel = ctx->temp_reg;
7006 alu.src[0].chan = 2;
7007 alu.src[0].abs = 1;
7008 alu.dst.sel = ctx->temp_reg;
7009 alu.dst.chan = i;
7010 if (i == 2)
7011 alu.dst.write = 1;
7012 if (i == 2)
7013 alu.last = 1;
7014 r = r600_bytecode_add_alu(ctx->bc, &alu);
7015 if (r)
7016 return r;
7017 }
7018 } else {
7019 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7020 alu.op = ALU_OP1_RECIP_IEEE;
7021 alu.src[0].sel = ctx->temp_reg;
7022 alu.src[0].chan = 2;
7023 alu.src[0].abs = 1;
7024 alu.dst.sel = ctx->temp_reg;
7025 alu.dst.chan = 2;
7026 alu.dst.write = 1;
7027 alu.last = 1;
7028 r = r600_bytecode_add_alu(ctx->bc, &alu);
7029 if (r)
7030 return r;
7031 }
7032
7033 /* MULADD R0.x, R0.x, PS1, (0x3FC00000, 1.5f).x
7034 * MULADD R0.y, R0.y, PS1, (0x3FC00000, 1.5f).x
7035 * muladd has no writemask, have to use another temp
7036 */
7037 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7038 alu.op = ALU_OP3_MULADD;
7039 alu.is_op3 = 1;
7040
7041 alu.src[0].sel = ctx->temp_reg;
7042 alu.src[0].chan = 0;
7043 alu.src[1].sel = ctx->temp_reg;
7044 alu.src[1].chan = 2;
7045
7046 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
7047 alu.src[2].chan = 0;
7048 alu.src[2].value = u_bitcast_f2u(1.5f);
7049
7050 alu.dst.sel = ctx->temp_reg;
7051 alu.dst.chan = 0;
7052 alu.dst.write = 1;
7053
7054 r = r600_bytecode_add_alu(ctx->bc, &alu);
7055 if (r)
7056 return r;
7057
7058 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7059 alu.op = ALU_OP3_MULADD;
7060 alu.is_op3 = 1;
7061
7062 alu.src[0].sel = ctx->temp_reg;
7063 alu.src[0].chan = 1;
7064 alu.src[1].sel = ctx->temp_reg;
7065 alu.src[1].chan = 2;
7066
7067 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
7068 alu.src[2].chan = 0;
7069 alu.src[2].value = u_bitcast_f2u(1.5f);
7070
7071 alu.dst.sel = ctx->temp_reg;
7072 alu.dst.chan = 1;
7073 alu.dst.write = 1;
7074
7075 alu.last = 1;
7076 r = r600_bytecode_add_alu(ctx->bc, &alu);
7077 if (r)
7078 return r;
7079 /* write initial compare value into Z component
7080 - W src 0 for shadow cube
7081 - X src 1 for shadow cube array */
7082 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
7083 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
7084 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7085 alu.op = ALU_OP1_MOV;
7086 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY)
7087 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
7088 else
7089 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7090 alu.dst.sel = ctx->temp_reg;
7091 alu.dst.chan = 2;
7092 alu.dst.write = 1;
7093 alu.last = 1;
7094 r = r600_bytecode_add_alu(ctx->bc, &alu);
7095 if (r)
7096 return r;
7097 }
7098
7099 if (inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
7100 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
7101 if (ctx->bc->chip_class >= EVERGREEN) {
7102 int mytmp = r600_get_temp(ctx);
7103 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7104 alu.op = ALU_OP1_MOV;
7105 alu.src[0].sel = ctx->temp_reg;
7106 alu.src[0].chan = 3;
7107 alu.dst.sel = mytmp;
7108 alu.dst.chan = 0;
7109 alu.dst.write = 1;
7110 alu.last = 1;
7111 r = r600_bytecode_add_alu(ctx->bc, &alu);
7112 if (r)
7113 return r;
7114
7115 /* have to multiply original layer by 8 and add to face id (temp.w) in Z */
7116 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7117 alu.op = ALU_OP3_MULADD;
7118 alu.is_op3 = 1;
7119 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7120 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
7121 alu.src[1].chan = 0;
7122 alu.src[1].value = u_bitcast_f2u(8.0f);
7123 alu.src[2].sel = mytmp;
7124 alu.src[2].chan = 0;
7125 alu.dst.sel = ctx->temp_reg;
7126 alu.dst.chan = 3;
7127 alu.dst.write = 1;
7128 alu.last = 1;
7129 r = r600_bytecode_add_alu(ctx->bc, &alu);
7130 if (r)
7131 return r;
7132 } else if (ctx->bc->chip_class < EVERGREEN) {
7133 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
7134 tex.op = FETCH_OP_SET_CUBEMAP_INDEX;
7135 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
7136 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
7137 tex.src_gpr = r600_get_temp(ctx);
7138 tex.src_sel_x = 0;
7139 tex.src_sel_y = 0;
7140 tex.src_sel_z = 0;
7141 tex.src_sel_w = 0;
7142 tex.dst_sel_x = tex.dst_sel_y = tex.dst_sel_z = tex.dst_sel_w = 7;
7143 tex.coord_type_x = 1;
7144 tex.coord_type_y = 1;
7145 tex.coord_type_z = 1;
7146 tex.coord_type_w = 1;
7147 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7148 alu.op = ALU_OP1_MOV;
7149 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7150 alu.dst.sel = tex.src_gpr;
7151 alu.dst.chan = 0;
7152 alu.last = 1;
7153 alu.dst.write = 1;
7154 r = r600_bytecode_add_alu(ctx->bc, &alu);
7155 if (r)
7156 return r;
7157
7158 r = r600_bytecode_add_tex(ctx->bc, &tex);
7159 if (r)
7160 return r;
7161 }
7162
7163 }
7164
7165 /* for cube forms of lod and bias we need to route things */
7166 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB ||
7167 inst->Instruction.Opcode == TGSI_OPCODE_TXL ||
7168 inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
7169 inst->Instruction.Opcode == TGSI_OPCODE_TXL2) {
7170 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7171 alu.op = ALU_OP1_MOV;
7172 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
7173 inst->Instruction.Opcode == TGSI_OPCODE_TXL2)
7174 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
7175 else
7176 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
7177 alu.dst.sel = ctx->temp_reg;
7178 alu.dst.chan = 2;
7179 alu.last = 1;
7180 alu.dst.write = 1;
7181 r = r600_bytecode_add_alu(ctx->bc, &alu);
7182 if (r)
7183 return r;
7184 }
7185
7186 src_loaded = TRUE;
7187 src_gpr = ctx->temp_reg;
7188 }
7189
7190 if (inst->Instruction.Opcode == TGSI_OPCODE_TXD) {
7191 int temp_h = 0, temp_v = 0;
7192 int start_val = 0;
7193
7194 /* if we've already loaded the src (i.e. CUBE don't reload it). */
7195 if (src_loaded == TRUE)
7196 start_val = 1;
7197 else
7198 src_loaded = TRUE;
7199 for (i = start_val; i < 3; i++) {
7200 int treg = r600_get_temp(ctx);
7201
7202 if (i == 0)
7203 src_gpr = treg;
7204 else if (i == 1)
7205 temp_h = treg;
7206 else
7207 temp_v = treg;
7208
7209 for (j = 0; j < 4; j++) {
7210 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7211 alu.op = ALU_OP1_MOV;
7212 r600_bytecode_src(&alu.src[0], &ctx->src[i], j);
7213 alu.dst.sel = treg;
7214 alu.dst.chan = j;
7215 if (j == 3)
7216 alu.last = 1;
7217 alu.dst.write = 1;
7218 r = r600_bytecode_add_alu(ctx->bc, &alu);
7219 if (r)
7220 return r;
7221 }
7222 }
7223 for (i = 1; i < 3; i++) {
7224 /* set gradients h/v */
7225 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
7226 tex.op = (i == 1) ? FETCH_OP_SET_GRADIENTS_H :
7227 FETCH_OP_SET_GRADIENTS_V;
7228 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
7229 tex.sampler_index_mode = sampler_index_mode;
7230 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
7231 tex.resource_index_mode = sampler_index_mode;
7232
7233 tex.src_gpr = (i == 1) ? temp_h : temp_v;
7234 tex.src_sel_x = 0;
7235 tex.src_sel_y = 1;
7236 tex.src_sel_z = 2;
7237 tex.src_sel_w = 3;
7238
7239 tex.dst_gpr = r600_get_temp(ctx); /* just to avoid confusing the asm scheduler */
7240 tex.dst_sel_x = tex.dst_sel_y = tex.dst_sel_z = tex.dst_sel_w = 7;
7241 if (inst->Texture.Texture != TGSI_TEXTURE_RECT) {
7242 tex.coord_type_x = 1;
7243 tex.coord_type_y = 1;
7244 tex.coord_type_z = 1;
7245 tex.coord_type_w = 1;
7246 }
7247 r = r600_bytecode_add_tex(ctx->bc, &tex);
7248 if (r)
7249 return r;
7250 }
7251 }
7252
7253 if (src_requires_loading && !src_loaded) {
7254 for (i = 0; i < 4; i++) {
7255 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7256 alu.op = ALU_OP1_MOV;
7257 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7258 alu.dst.sel = ctx->temp_reg;
7259 alu.dst.chan = i;
7260 if (i == 3)
7261 alu.last = 1;
7262 alu.dst.write = 1;
7263 r = r600_bytecode_add_alu(ctx->bc, &alu);
7264 if (r)
7265 return r;
7266 }
7267 src_loaded = TRUE;
7268 src_gpr = ctx->temp_reg;
7269 }
7270
7271 /* get offset values */
7272 if (inst->Texture.NumOffsets) {
7273 assert(inst->Texture.NumOffsets == 1);
7274
7275 /* The texture offset feature doesn't work with the TXF instruction
7276 * and must be emulated by adding the offset to the texture coordinates. */
7277 if (txf_add_offsets) {
7278 const struct tgsi_texture_offset *off = inst->TexOffsets;
7279
7280 switch (inst->Texture.Texture) {
7281 case TGSI_TEXTURE_3D:
7282 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7283 alu.op = ALU_OP2_ADD_INT;
7284 alu.src[0].sel = src_gpr;
7285 alu.src[0].chan = 2;
7286 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
7287 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleZ];
7288 alu.dst.sel = src_gpr;
7289 alu.dst.chan = 2;
7290 alu.dst.write = 1;
7291 alu.last = 1;
7292 r = r600_bytecode_add_alu(ctx->bc, &alu);
7293 if (r)
7294 return r;
7295 /* fall through */
7296
7297 case TGSI_TEXTURE_2D:
7298 case TGSI_TEXTURE_SHADOW2D:
7299 case TGSI_TEXTURE_RECT:
7300 case TGSI_TEXTURE_SHADOWRECT:
7301 case TGSI_TEXTURE_2D_ARRAY:
7302 case TGSI_TEXTURE_SHADOW2D_ARRAY:
7303 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7304 alu.op = ALU_OP2_ADD_INT;
7305 alu.src[0].sel = src_gpr;
7306 alu.src[0].chan = 1;
7307 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
7308 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleY];
7309 alu.dst.sel = src_gpr;
7310 alu.dst.chan = 1;
7311 alu.dst.write = 1;
7312 alu.last = 1;
7313 r = r600_bytecode_add_alu(ctx->bc, &alu);
7314 if (r)
7315 return r;
7316 /* fall through */
7317
7318 case TGSI_TEXTURE_1D:
7319 case TGSI_TEXTURE_SHADOW1D:
7320 case TGSI_TEXTURE_1D_ARRAY:
7321 case TGSI_TEXTURE_SHADOW1D_ARRAY:
7322 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7323 alu.op = ALU_OP2_ADD_INT;
7324 alu.src[0].sel = src_gpr;
7325 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
7326 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleX];
7327 alu.dst.sel = src_gpr;
7328 alu.dst.write = 1;
7329 alu.last = 1;
7330 r = r600_bytecode_add_alu(ctx->bc, &alu);
7331 if (r)
7332 return r;
7333 break;
7334 /* texture offsets do not apply to other texture targets */
7335 }
7336 } else {
7337 switch (inst->Texture.Texture) {
7338 case TGSI_TEXTURE_3D:
7339 offset_z = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleZ] << 1;
7340 /* fallthrough */
7341 case TGSI_TEXTURE_2D:
7342 case TGSI_TEXTURE_SHADOW2D:
7343 case TGSI_TEXTURE_RECT:
7344 case TGSI_TEXTURE_SHADOWRECT:
7345 case TGSI_TEXTURE_2D_ARRAY:
7346 case TGSI_TEXTURE_SHADOW2D_ARRAY:
7347 offset_y = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleY] << 1;
7348 /* fallthrough */
7349 case TGSI_TEXTURE_1D:
7350 case TGSI_TEXTURE_SHADOW1D:
7351 case TGSI_TEXTURE_1D_ARRAY:
7352 case TGSI_TEXTURE_SHADOW1D_ARRAY:
7353 offset_x = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleX] << 1;
7354 }
7355 }
7356 }
7357
7358 /* Obtain the sample index for reading a compressed MSAA color texture.
7359 * To read the FMASK, we use the ldfptr instruction, which tells us
7360 * where the samples are stored.
7361 * For uncompressed 8x MSAA surfaces, ldfptr should return 0x76543210,
7362 * which is the identity mapping. Each nibble says which physical sample
7363 * should be fetched to get that sample.
7364 *
7365 * Assume src.z contains the sample index. It should be modified like this:
7366 * src.z = (ldfptr() >> (src.z * 4)) & 0xF;
7367 * Then fetch the texel with src.
7368 */
7369 if (read_compressed_msaa) {
7370 unsigned sample_chan = 3;
7371 unsigned temp = r600_get_temp(ctx);
7372 assert(src_loaded);
7373
7374 /* temp.w = ldfptr() */
7375 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
7376 tex.op = FETCH_OP_LD;
7377 tex.inst_mod = 1; /* to indicate this is ldfptr */
7378 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
7379 tex.sampler_index_mode = sampler_index_mode;
7380 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
7381 tex.resource_index_mode = sampler_index_mode;
7382 tex.src_gpr = src_gpr;
7383 tex.dst_gpr = temp;
7384 tex.dst_sel_x = 7; /* mask out these components */
7385 tex.dst_sel_y = 7;
7386 tex.dst_sel_z = 7;
7387 tex.dst_sel_w = 0; /* store X */
7388 tex.src_sel_x = 0;
7389 tex.src_sel_y = 1;
7390 tex.src_sel_z = 2;
7391 tex.src_sel_w = 3;
7392 tex.offset_x = offset_x;
7393 tex.offset_y = offset_y;
7394 tex.offset_z = offset_z;
7395 r = r600_bytecode_add_tex(ctx->bc, &tex);
7396 if (r)
7397 return r;
7398
7399 /* temp.x = sample_index*4 */
7400 if (ctx->bc->chip_class == CAYMAN) {
7401 for (i = 0 ; i < 4; i++) {
7402 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7403 alu.op = ALU_OP2_MULLO_INT;
7404 alu.src[0].sel = src_gpr;
7405 alu.src[0].chan = sample_chan;
7406 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
7407 alu.src[1].value = 4;
7408 alu.dst.sel = temp;
7409 alu.dst.chan = i;
7410 alu.dst.write = i == 0;
7411 if (i == 3)
7412 alu.last = 1;
7413 r = r600_bytecode_add_alu(ctx->bc, &alu);
7414 if (r)
7415 return r;
7416 }
7417 } else {
7418 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7419 alu.op = ALU_OP2_MULLO_INT;
7420 alu.src[0].sel = src_gpr;
7421 alu.src[0].chan = sample_chan;
7422 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
7423 alu.src[1].value = 4;
7424 alu.dst.sel = temp;
7425 alu.dst.chan = 0;
7426 alu.dst.write = 1;
7427 alu.last = 1;
7428 r = r600_bytecode_add_alu(ctx->bc, &alu);
7429 if (r)
7430 return r;
7431 }
7432
7433 /* sample_index = temp.w >> temp.x */
7434 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7435 alu.op = ALU_OP2_LSHR_INT;
7436 alu.src[0].sel = temp;
7437 alu.src[0].chan = 3;
7438 alu.src[1].sel = temp;
7439 alu.src[1].chan = 0;
7440 alu.dst.sel = src_gpr;
7441 alu.dst.chan = sample_chan;
7442 alu.dst.write = 1;
7443 alu.last = 1;
7444 r = r600_bytecode_add_alu(ctx->bc, &alu);
7445 if (r)
7446 return r;
7447
7448 /* sample_index & 0xF */
7449 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7450 alu.op = ALU_OP2_AND_INT;
7451 alu.src[0].sel = src_gpr;
7452 alu.src[0].chan = sample_chan;
7453 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
7454 alu.src[1].value = 0xF;
7455 alu.dst.sel = src_gpr;
7456 alu.dst.chan = sample_chan;
7457 alu.dst.write = 1;
7458 alu.last = 1;
7459 r = r600_bytecode_add_alu(ctx->bc, &alu);
7460 if (r)
7461 return r;
7462 #if 0
7463 /* visualize the FMASK */
7464 for (i = 0; i < 4; i++) {
7465 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7466 alu.op = ALU_OP1_INT_TO_FLT;
7467 alu.src[0].sel = src_gpr;
7468 alu.src[0].chan = sample_chan;
7469 alu.dst.sel = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
7470 alu.dst.chan = i;
7471 alu.dst.write = 1;
7472 alu.last = 1;
7473 r = r600_bytecode_add_alu(ctx->bc, &alu);
7474 if (r)
7475 return r;
7476 }
7477 return 0;
7478 #endif
7479 }
7480
7481 /* does this shader want a num layers from TXQ for a cube array? */
7482 if (has_txq_cube_array_z) {
7483 int id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
7484
7485 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7486 alu.op = ALU_OP1_MOV;
7487
7488 alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
7489 if (ctx->bc->chip_class >= EVERGREEN) {
7490 /* channel 1 or 3 of each word */
7491 alu.src[0].sel += (id / 2);
7492 alu.src[0].chan = ((id % 2) * 2) + 1;
7493 } else {
7494 /* r600 we have them at channel 2 of the second dword */
7495 alu.src[0].sel += (id * 2) + 1;
7496 alu.src[0].chan = 2;
7497 }
7498 alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
7499 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
7500 alu.last = 1;
7501 r = r600_bytecode_add_alu(ctx->bc, &alu);
7502 if (r)
7503 return r;
7504 /* disable writemask from texture instruction */
7505 inst->Dst[0].Register.WriteMask &= ~4;
7506 }
7507
7508 opcode = ctx->inst_info->op;
7509 if (opcode == FETCH_OP_GATHER4 &&
7510 inst->TexOffsets[0].File != TGSI_FILE_NULL &&
7511 inst->TexOffsets[0].File != TGSI_FILE_IMMEDIATE) {
7512 opcode = FETCH_OP_GATHER4_O;
7513
7514 /* GATHER4_O/GATHER4_C_O use offset values loaded by
7515 SET_TEXTURE_OFFSETS instruction. The immediate offset values
7516 encoded in the instruction are ignored. */
7517 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
7518 tex.op = FETCH_OP_SET_TEXTURE_OFFSETS;
7519 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
7520 tex.sampler_index_mode = sampler_index_mode;
7521 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
7522 tex.resource_index_mode = sampler_index_mode;
7523
7524 tex.src_gpr = ctx->file_offset[inst->TexOffsets[0].File] + inst->TexOffsets[0].Index;
7525 tex.src_sel_x = inst->TexOffsets[0].SwizzleX;
7526 tex.src_sel_y = inst->TexOffsets[0].SwizzleY;
7527 tex.src_sel_z = inst->TexOffsets[0].SwizzleZ;
7528 tex.src_sel_w = 4;
7529
7530 tex.dst_sel_x = 7;
7531 tex.dst_sel_y = 7;
7532 tex.dst_sel_z = 7;
7533 tex.dst_sel_w = 7;
7534
7535 r = r600_bytecode_add_tex(ctx->bc, &tex);
7536 if (r)
7537 return r;
7538 }
7539
7540 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
7541 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
7542 inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
7543 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
7544 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY ||
7545 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
7546 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
7547 switch (opcode) {
7548 case FETCH_OP_SAMPLE:
7549 opcode = FETCH_OP_SAMPLE_C;
7550 break;
7551 case FETCH_OP_SAMPLE_L:
7552 opcode = FETCH_OP_SAMPLE_C_L;
7553 break;
7554 case FETCH_OP_SAMPLE_LB:
7555 opcode = FETCH_OP_SAMPLE_C_LB;
7556 break;
7557 case FETCH_OP_SAMPLE_G:
7558 opcode = FETCH_OP_SAMPLE_C_G;
7559 break;
7560 /* Texture gather variants */
7561 case FETCH_OP_GATHER4:
7562 opcode = FETCH_OP_GATHER4_C;
7563 break;
7564 case FETCH_OP_GATHER4_O:
7565 opcode = FETCH_OP_GATHER4_C_O;
7566 break;
7567 }
7568 }
7569
7570 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
7571 tex.op = opcode;
7572
7573 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
7574 tex.sampler_index_mode = sampler_index_mode;
7575 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
7576 tex.resource_index_mode = sampler_index_mode;
7577 tex.src_gpr = src_gpr;
7578 tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
7579
7580 if (inst->Instruction.Opcode == TGSI_OPCODE_DDX_FINE ||
7581 inst->Instruction.Opcode == TGSI_OPCODE_DDY_FINE) {
7582 tex.inst_mod = 1; /* per pixel gradient calculation instead of per 2x2 quad */
7583 }
7584
7585 if (inst->Instruction.Opcode == TGSI_OPCODE_TG4) {
7586 int8_t texture_component_select = ctx->literals[4 * inst->Src[1].Register.Index + inst->Src[1].Register.SwizzleX];
7587 tex.inst_mod = texture_component_select;
7588
7589 if (ctx->bc->chip_class == CAYMAN) {
7590 /* GATHER4 result order is different from TGSI TG4 */
7591 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 0 : 7;
7592 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 4) ? 1 : 7;
7593 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 1) ? 2 : 7;
7594 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
7595 } else {
7596 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
7597 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
7598 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
7599 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
7600 }
7601 }
7602 else if (inst->Instruction.Opcode == TGSI_OPCODE_LODQ) {
7603 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
7604 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
7605 tex.dst_sel_z = 7;
7606 tex.dst_sel_w = 7;
7607 }
7608 else if (inst->Instruction.Opcode == TGSI_OPCODE_TXQS) {
7609 tex.dst_sel_x = 3;
7610 tex.dst_sel_y = 7;
7611 tex.dst_sel_z = 7;
7612 tex.dst_sel_w = 7;
7613 }
7614 else {
7615 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
7616 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
7617 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
7618 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
7619 }
7620
7621
7622 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQS) {
7623 tex.src_sel_x = 4;
7624 tex.src_sel_y = 4;
7625 tex.src_sel_z = 4;
7626 tex.src_sel_w = 4;
7627 } else if (src_loaded) {
7628 tex.src_sel_x = 0;
7629 tex.src_sel_y = 1;
7630 tex.src_sel_z = 2;
7631 tex.src_sel_w = 3;
7632 } else {
7633 tex.src_sel_x = ctx->src[0].swizzle[0];
7634 tex.src_sel_y = ctx->src[0].swizzle[1];
7635 tex.src_sel_z = ctx->src[0].swizzle[2];
7636 tex.src_sel_w = ctx->src[0].swizzle[3];
7637 tex.src_rel = ctx->src[0].rel;
7638 }
7639
7640 if (inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
7641 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
7642 inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
7643 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
7644 tex.src_sel_x = 1;
7645 tex.src_sel_y = 0;
7646 tex.src_sel_z = 3;
7647 tex.src_sel_w = 2; /* route Z compare or Lod value into W */
7648 }
7649
7650 if (inst->Texture.Texture != TGSI_TEXTURE_RECT &&
7651 inst->Texture.Texture != TGSI_TEXTURE_SHADOWRECT) {
7652 tex.coord_type_x = 1;
7653 tex.coord_type_y = 1;
7654 }
7655 tex.coord_type_z = 1;
7656 tex.coord_type_w = 1;
7657
7658 tex.offset_x = offset_x;
7659 tex.offset_y = offset_y;
7660 if (inst->Instruction.Opcode == TGSI_OPCODE_TG4 &&
7661 (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
7662 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY)) {
7663 tex.offset_z = 0;
7664 }
7665 else {
7666 tex.offset_z = offset_z;
7667 }
7668
7669 /* Put the depth for comparison in W.
7670 * TGSI_TEXTURE_SHADOW2D_ARRAY already has the depth in W.
7671 * Some instructions expect the depth in Z. */
7672 if ((inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
7673 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
7674 inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
7675 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY) &&
7676 opcode != FETCH_OP_SAMPLE_C_L &&
7677 opcode != FETCH_OP_SAMPLE_C_LB) {
7678 tex.src_sel_w = tex.src_sel_z;
7679 }
7680
7681 if (inst->Texture.Texture == TGSI_TEXTURE_1D_ARRAY ||
7682 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY) {
7683 if (opcode == FETCH_OP_SAMPLE_C_L ||
7684 opcode == FETCH_OP_SAMPLE_C_LB) {
7685 /* the array index is read from Y */
7686 tex.coord_type_y = 0;
7687 } else {
7688 /* the array index is read from Z */
7689 tex.coord_type_z = 0;
7690 tex.src_sel_z = tex.src_sel_y;
7691 }
7692 } else if (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
7693 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
7694 ((inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
7695 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) &&
7696 (ctx->bc->chip_class >= EVERGREEN)))
7697 /* the array index is read from Z */
7698 tex.coord_type_z = 0;
7699
7700 /* mask unused source components */
7701 if (opcode == FETCH_OP_SAMPLE || opcode == FETCH_OP_GATHER4) {
7702 switch (inst->Texture.Texture) {
7703 case TGSI_TEXTURE_2D:
7704 case TGSI_TEXTURE_RECT:
7705 tex.src_sel_z = 7;
7706 tex.src_sel_w = 7;
7707 break;
7708 case TGSI_TEXTURE_1D_ARRAY:
7709 tex.src_sel_y = 7;
7710 tex.src_sel_w = 7;
7711 break;
7712 case TGSI_TEXTURE_1D:
7713 tex.src_sel_y = 7;
7714 tex.src_sel_z = 7;
7715 tex.src_sel_w = 7;
7716 break;
7717 }
7718 }
7719
7720 r = r600_bytecode_add_tex(ctx->bc, &tex);
7721 if (r)
7722 return r;
7723
7724 /* add shadow ambient support - gallium doesn't do it yet */
7725 return 0;
7726 }
7727
7728 static int find_hw_atomic_counter(struct r600_shader_ctx *ctx,
7729 struct tgsi_full_src_register *src)
7730 {
7731 int i;
7732
7733 if (src->Register.Indirect) {
7734 for (i = 0; i < ctx->shader->nhwatomic_ranges; i++) {
7735 if (src->Indirect.ArrayID == ctx->shader->atomics[i].array_id)
7736 return ctx->shader->atomics[i].hw_idx;
7737 }
7738 } else {
7739 uint32_t index = src->Register.Index;
7740 for (i = 0; i < ctx->shader->nhwatomic_ranges; i++) {
7741 if (ctx->shader->atomics[i].buffer_id != src->Dimension.Index)
7742 continue;
7743 if (index > ctx->shader->atomics[i].end)
7744 continue;
7745 if (index < ctx->shader->atomics[i].start)
7746 continue;
7747 uint32_t offset = (index - ctx->shader->atomics[i].start);
7748 return ctx->shader->atomics[i].hw_idx + offset;
7749 }
7750 }
7751 assert(0);
7752 return -1;
7753 }
7754
7755
7756 static int tgsi_load_gds(struct r600_shader_ctx *ctx)
7757 {
7758 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7759 int r;
7760 struct r600_bytecode_gds gds;
7761 int uav_id = 0;
7762 int uav_index_mode = 0;
7763
7764 uav_id = find_hw_atomic_counter(ctx, &inst->Src[0]);
7765
7766 if (inst->Src[0].Register.Indirect)
7767 uav_index_mode = 2;
7768
7769 memset(&gds, 0, sizeof(struct r600_bytecode_gds));
7770 gds.op = FETCH_OP_GDS_READ_RET;
7771 gds.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
7772 gds.uav_id = uav_id;
7773 gds.uav_index_mode = uav_index_mode;
7774 gds.src_gpr = ctx->temp_reg;
7775 gds.src_sel_x = 4;
7776 gds.src_sel_y = 4;
7777 gds.src_sel_z = 4;
7778 gds.dst_sel_x = 0;
7779 gds.dst_sel_y = 7;
7780 gds.dst_sel_z = 7;
7781 gds.dst_sel_w = 7;
7782 gds.src_gpr2 = ctx->temp_reg;
7783 gds.alloc_consume = 1;
7784 r = r600_bytecode_add_gds(ctx->bc, &gds);
7785 if (r)
7786 return r;
7787
7788 ctx->bc->cf_last->vpm = 1;
7789 return 0;
7790 }
7791
7792 /* this fixes up 1D arrays properly */
7793 static int load_index_src(struct r600_shader_ctx *ctx, int src_index, int *idx_gpr)
7794 {
7795 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7796 int r, i;
7797 struct r600_bytecode_alu alu;
7798 int temp_reg = r600_get_temp(ctx);
7799
7800 for (i = 0; i < 4; i++) {
7801 bool def_val = true, write_zero = false;
7802 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7803 alu.op = ALU_OP1_MOV;
7804 alu.dst.sel = temp_reg;
7805 alu.dst.chan = i;
7806
7807 switch (inst->Memory.Texture) {
7808 case TGSI_TEXTURE_BUFFER:
7809 case TGSI_TEXTURE_1D:
7810 if (i == 1 || i == 2 || i == 3) {
7811 write_zero = true;
7812 }
7813 break;
7814 case TGSI_TEXTURE_1D_ARRAY:
7815 if (i == 1 || i == 3)
7816 write_zero = true;
7817 else if (i == 2) {
7818 r600_bytecode_src(&alu.src[0], &ctx->src[src_index], 1);
7819 def_val = false;
7820 }
7821 break;
7822 case TGSI_TEXTURE_2D:
7823 if (i == 2 || i == 3)
7824 write_zero = true;
7825 break;
7826 default:
7827 if (i == 3)
7828 write_zero = true;
7829 break;
7830 }
7831
7832 if (write_zero) {
7833 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
7834 alu.src[0].value = 0;
7835 } else if (def_val) {
7836 r600_bytecode_src(&alu.src[0], &ctx->src[src_index], i);
7837 }
7838
7839 if (i == 3)
7840 alu.last = 1;
7841 alu.dst.write = 1;
7842 r = r600_bytecode_add_alu(ctx->bc, &alu);
7843 if (r)
7844 return r;
7845 }
7846 *idx_gpr = temp_reg;
7847 return 0;
7848 }
7849
7850 static int tgsi_load_rat(struct r600_shader_ctx *ctx)
7851 {
7852 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7853 /* have to work out the offset into the RAT immediate return buffer */
7854 struct r600_bytecode_vtx vtx;
7855 struct r600_bytecode_cf *cf;
7856 int r;
7857 int idx_gpr;
7858 unsigned format, num_format, format_comp, endian;
7859 const struct util_format_description *desc;
7860 unsigned rat_index_mode;
7861 unsigned immed_base;
7862
7863 rat_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
7864
7865 immed_base = R600_IMAGE_IMMED_RESOURCE_OFFSET;
7866 r = load_index_src(ctx, 1, &idx_gpr);
7867 if (r)
7868 return r;
7869
7870 if (rat_index_mode)
7871 egcm_load_index_reg(ctx->bc, 1, false);
7872
7873 r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT);
7874 cf = ctx->bc->cf_last;
7875
7876 cf->rat.id = ctx->shader->rat_base + inst->Src[0].Register.Index;
7877 cf->rat.inst = V_RAT_INST_NOP_RTN;
7878 cf->rat.index_mode = rat_index_mode;
7879 cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_READ_IND;
7880 cf->output.gpr = ctx->thread_id_gpr;
7881 cf->output.index_gpr = idx_gpr;
7882 cf->output.comp_mask = 0xf;
7883 cf->output.burst_count = 1;
7884 cf->vpm = 1;
7885 cf->barrier = 1;
7886 cf->mark = 1;
7887 cf->output.elem_size = 0;
7888
7889 r600_bytecode_add_cfinst(ctx->bc, CF_OP_WAIT_ACK);
7890 cf = ctx->bc->cf_last;
7891 cf->barrier = 1;
7892
7893 desc = util_format_description(inst->Memory.Format);
7894 r600_vertex_data_type(inst->Memory.Format,
7895 &format, &num_format, &format_comp, &endian);
7896 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
7897 vtx.op = FETCH_OP_VFETCH;
7898 vtx.buffer_id = immed_base + inst->Src[0].Register.Index;
7899 vtx.buffer_index_mode = rat_index_mode;
7900 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
7901 vtx.src_gpr = ctx->thread_id_gpr;
7902 vtx.src_sel_x = 1;
7903 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
7904 vtx.dst_sel_x = desc->swizzle[0];
7905 vtx.dst_sel_y = desc->swizzle[1];
7906 vtx.dst_sel_z = desc->swizzle[2];
7907 vtx.dst_sel_w = desc->swizzle[3];
7908 vtx.srf_mode_all = 1;
7909 vtx.data_format = format;
7910 vtx.num_format_all = num_format;
7911 vtx.format_comp_all = format_comp;
7912 vtx.endian = endian;
7913 vtx.offset = 0;
7914 vtx.mega_fetch_count = 3;
7915 r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx);
7916 if (r)
7917 return r;
7918 cf = ctx->bc->cf_last;
7919 cf->barrier = 1;
7920 return 0;
7921 }
7922
7923 static int tgsi_load(struct r600_shader_ctx *ctx)
7924 {
7925 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7926 if (inst->Src[0].Register.File == TGSI_FILE_IMAGE)
7927 return tgsi_load_rat(ctx);
7928 if (inst->Src[0].Register.File == TGSI_FILE_HW_ATOMIC)
7929 return tgsi_load_gds(ctx);
7930 return 0;
7931 }
7932
7933 static int tgsi_store_rat(struct r600_shader_ctx *ctx)
7934 {
7935 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7936 struct r600_bytecode_cf *cf;
7937 bool src_requires_loading = false;
7938 int val_gpr, idx_gpr;
7939 int r, i;
7940 unsigned rat_index_mode;
7941
7942 rat_index_mode = inst->Dst[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
7943
7944 r = load_index_src(ctx, 0, &idx_gpr);
7945 if (r)
7946 return r;
7947
7948 if (inst->Src[1].Register.File != TGSI_FILE_TEMPORARY)
7949 src_requires_loading = true;
7950
7951 if (src_requires_loading) {
7952 struct r600_bytecode_alu alu;
7953 for (i = 0; i < 4; i++) {
7954 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7955 alu.op = ALU_OP1_MOV;
7956 alu.dst.sel = ctx->temp_reg;
7957 alu.dst.chan = i;
7958
7959 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
7960 if (i == 3)
7961 alu.last = 1;
7962 alu.dst.write = 1;
7963 r = r600_bytecode_add_alu(ctx->bc, &alu);
7964 if (r)
7965 return r;
7966 }
7967 val_gpr = ctx->temp_reg;
7968 } else
7969 val_gpr = tgsi_tex_get_src_gpr(ctx, 1);
7970 if (rat_index_mode)
7971 egcm_load_index_reg(ctx->bc, 1, false);
7972
7973 r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT);
7974 cf = ctx->bc->cf_last;
7975
7976 cf->rat.id = ctx->shader->rat_base + inst->Dst[0].Register.Index;
7977 cf->rat.inst = V_RAT_INST_STORE_TYPED;
7978 cf->rat.index_mode = rat_index_mode;
7979 cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND;
7980 cf->output.gpr = val_gpr;
7981 cf->output.index_gpr = idx_gpr;
7982 cf->output.comp_mask = 0xf;
7983 cf->output.burst_count = 1;
7984 cf->vpm = 1;
7985 cf->barrier = 1;
7986 cf->output.elem_size = 0;
7987 return 0;
7988 }
7989
7990 static int tgsi_store(struct r600_shader_ctx *ctx)
7991 {
7992 return tgsi_store_rat(ctx);
7993 }
7994
7995 static int tgsi_atomic_op_rat(struct r600_shader_ctx *ctx)
7996 {
7997 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7998 /* have to work out the offset into the RAT immediate return buffer */
7999 struct r600_bytecode_alu alu;
8000 struct r600_bytecode_vtx vtx;
8001 struct r600_bytecode_cf *cf;
8002 int r;
8003 int idx_gpr;
8004 unsigned format, num_format, format_comp, endian;
8005 const struct util_format_description *desc;
8006 unsigned rat_index_mode;
8007 unsigned immed_base;
8008
8009 immed_base = R600_IMAGE_IMMED_RESOURCE_OFFSET;
8010
8011 assert (inst->Src[0].Register.File == TGSI_FILE_IMAGE);
8012 rat_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
8013
8014 r = load_index_src(ctx, 1, &idx_gpr);
8015 if (r)
8016 return r;
8017
8018 if (ctx->inst_info->op == V_RAT_INST_CMPXCHG_INT_RTN) {
8019 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8020 alu.op = ALU_OP1_MOV;
8021 alu.dst.sel = ctx->thread_id_gpr;
8022 alu.dst.chan = 0;
8023 alu.dst.write = 1;
8024 r600_bytecode_src(&alu.src[0], &ctx->src[3], 0);
8025 alu.last = 1;
8026 r = r600_bytecode_add_alu(ctx->bc, &alu);
8027 if (r)
8028 return r;
8029
8030 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8031 alu.op = ALU_OP1_MOV;
8032 alu.dst.sel = ctx->thread_id_gpr;
8033 alu.dst.chan = 3;
8034 alu.dst.write = 1;
8035 r600_bytecode_src(&alu.src[0], &ctx->src[2], 0);
8036 alu.last = 1;
8037 r = r600_bytecode_add_alu(ctx->bc, &alu);
8038 if (r)
8039 return r;
8040 } else {
8041 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8042 alu.op = ALU_OP1_MOV;
8043 alu.dst.sel = ctx->thread_id_gpr;
8044 alu.dst.chan = 0;
8045 alu.dst.write = 1;
8046 r600_bytecode_src(&alu.src[0], &ctx->src[2], 0);
8047 alu.last = 1;
8048 r = r600_bytecode_add_alu(ctx->bc, &alu);
8049 if (r)
8050 return r;
8051 }
8052
8053 if (rat_index_mode)
8054 egcm_load_index_reg(ctx->bc, 1, false);
8055 r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT);
8056 cf = ctx->bc->cf_last;
8057
8058 cf->rat.id = ctx->shader->rat_base + inst->Src[0].Register.Index;
8059 cf->rat.inst = ctx->inst_info->op;
8060 cf->rat.index_mode = rat_index_mode;
8061 cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_READ_IND;
8062 cf->output.gpr = ctx->thread_id_gpr;
8063 cf->output.index_gpr = idx_gpr;
8064 cf->output.comp_mask = 0xf;
8065 cf->output.burst_count = 1;
8066 cf->vpm = 1;
8067 cf->barrier = 1;
8068 cf->mark = 1;
8069 cf->output.elem_size = 0;
8070 r600_bytecode_add_cfinst(ctx->bc, CF_OP_WAIT_ACK);
8071 cf = ctx->bc->cf_last;
8072 cf->barrier = 1;
8073 cf->cf_addr = 1;
8074
8075 desc = util_format_description(inst->Memory.Format);
8076 r600_vertex_data_type(inst->Memory.Format,
8077 &format, &num_format, &format_comp, &endian);
8078 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
8079 vtx.op = FETCH_OP_VFETCH;
8080 vtx.buffer_id = immed_base + inst->Src[0].Register.Index;
8081 vtx.buffer_index_mode = rat_index_mode;
8082 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
8083 vtx.src_gpr = ctx->thread_id_gpr;
8084 vtx.src_sel_x = 1;
8085 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
8086 vtx.dst_sel_x = desc->swizzle[0];
8087 vtx.dst_sel_y = 7;
8088 vtx.dst_sel_z = 7;
8089 vtx.dst_sel_w = 7;
8090 vtx.use_const_fields = 0;
8091 vtx.srf_mode_all = 1;
8092 vtx.data_format = format;
8093 vtx.num_format_all = num_format;
8094 vtx.format_comp_all = format_comp;
8095 vtx.endian = endian;
8096 vtx.offset = 0;
8097 vtx.mega_fetch_count = 0xf;
8098 r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx);
8099 if (r)
8100 return r;
8101 cf = ctx->bc->cf_last;
8102 cf->vpm = 1;
8103 cf->barrier = 1;
8104 return 0;
8105 }
8106
8107 static int get_gds_op(int opcode)
8108 {
8109 switch (opcode) {
8110 case TGSI_OPCODE_ATOMUADD:
8111 return FETCH_OP_GDS_ADD_RET;
8112 case TGSI_OPCODE_ATOMAND:
8113 return FETCH_OP_GDS_AND_RET;
8114 case TGSI_OPCODE_ATOMOR:
8115 return FETCH_OP_GDS_OR_RET;
8116 case TGSI_OPCODE_ATOMXOR:
8117 return FETCH_OP_GDS_XOR_RET;
8118 case TGSI_OPCODE_ATOMUMIN:
8119 return FETCH_OP_GDS_MIN_UINT_RET;
8120 case TGSI_OPCODE_ATOMUMAX:
8121 return FETCH_OP_GDS_MAX_UINT_RET;
8122 case TGSI_OPCODE_ATOMXCHG:
8123 return FETCH_OP_GDS_XCHG_RET;
8124 case TGSI_OPCODE_ATOMCAS:
8125 return FETCH_OP_GDS_CMP_XCHG_RET;
8126 default:
8127 return -1;
8128 }
8129 }
8130
8131 static int tgsi_atomic_op_gds(struct r600_shader_ctx *ctx)
8132 {
8133 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8134 struct r600_bytecode_gds gds;
8135 struct r600_bytecode_alu alu;
8136 int gds_op = get_gds_op(inst->Instruction.Opcode);
8137 int r;
8138 int uav_id = 0;
8139 int uav_index_mode = 0;
8140
8141 if (gds_op == -1) {
8142 fprintf(stderr, "unknown GDS op for opcode %d\n", inst->Instruction.Opcode);
8143 return -1;
8144 }
8145
8146 uav_id = find_hw_atomic_counter(ctx, &inst->Src[0]);
8147
8148 if (inst->Src[0].Register.Indirect)
8149 uav_index_mode = 2;
8150
8151 if (inst->Src[2].Register.File == TGSI_FILE_IMMEDIATE) {
8152 int value = (ctx->literals[4 * inst->Src[2].Register.Index + inst->Src[2].Register.SwizzleX]);
8153 int abs_value = abs(value);
8154 if (abs_value != value && gds_op == FETCH_OP_GDS_ADD_RET)
8155 gds_op = FETCH_OP_GDS_SUB_RET;
8156 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8157 alu.op = ALU_OP1_MOV;
8158 alu.dst.sel = ctx->temp_reg;
8159 alu.dst.chan = 0;
8160 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
8161 alu.src[0].value = abs_value;
8162 alu.last = 1;
8163 alu.dst.write = 1;
8164 r = r600_bytecode_add_alu(ctx->bc, &alu);
8165 if (r)
8166 return r;
8167 } else {
8168 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8169 alu.op = ALU_OP1_MOV;
8170 alu.dst.sel = ctx->temp_reg;
8171 alu.dst.chan = 0;
8172 r600_bytecode_src(&alu.src[0], &ctx->src[2], 0);
8173 alu.last = 1;
8174 alu.dst.write = 1;
8175 r = r600_bytecode_add_alu(ctx->bc, &alu);
8176 if (r)
8177 return r;
8178 }
8179
8180 memset(&gds, 0, sizeof(struct r600_bytecode_gds));
8181 gds.op = gds_op;
8182 gds.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
8183 gds.uav_id = uav_id;
8184 gds.uav_index_mode = uav_index_mode;
8185 gds.src_gpr = ctx->temp_reg;
8186 gds.src_gpr2 = ctx->temp_reg;
8187 gds.src_sel_x = 4;
8188 gds.src_sel_y = 0;
8189 gds.src_sel_z = 4;
8190 gds.dst_sel_x = 0;
8191 gds.dst_sel_y = 7;
8192 gds.dst_sel_z = 7;
8193 gds.dst_sel_w = 7;
8194 gds.alloc_consume = 1;
8195 r = r600_bytecode_add_gds(ctx->bc, &gds);
8196 if (r)
8197 return r;
8198 ctx->bc->cf_last->vpm = 1;
8199 return 0;
8200 }
8201
8202 static int tgsi_atomic_op(struct r600_shader_ctx *ctx)
8203 {
8204 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8205 if (inst->Src[0].Register.File == TGSI_FILE_IMAGE)
8206 return tgsi_atomic_op_rat(ctx);
8207 if (inst->Src[0].Register.File == TGSI_FILE_HW_ATOMIC)
8208 return tgsi_atomic_op_gds(ctx);
8209 return 0;
8210 }
8211
8212 static int tgsi_resq(struct r600_shader_ctx *ctx)
8213 {
8214 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8215 unsigned sampler_index_mode;
8216 struct r600_bytecode_tex tex;
8217 int r;
8218 boolean has_txq_cube_array_z = false;
8219
8220 if (inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
8221 ctx->shader->uses_tex_buffers = true;
8222 return r600_do_buffer_txq(ctx, 0, ctx->shader->image_size_const_offset);
8223 }
8224
8225 if (inst->Memory.Texture == TGSI_TEXTURE_CUBE_ARRAY &&
8226 inst->Dst[0].Register.WriteMask & 4) {
8227 ctx->shader->has_txq_cube_array_z_comp = true;
8228 has_txq_cube_array_z = true;
8229 }
8230
8231 sampler_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
8232 if (sampler_index_mode)
8233 egcm_load_index_reg(ctx->bc, 1, false);
8234
8235
8236 /* does this shader want a num layers from TXQ for a cube array? */
8237 if (has_txq_cube_array_z) {
8238 int id = tgsi_tex_get_src_gpr(ctx, 0) + ctx->shader->image_size_const_offset;
8239 struct r600_bytecode_alu alu;
8240
8241 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8242 alu.op = ALU_OP1_MOV;
8243
8244 alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
8245 /* channel 1 or 3 of each word */
8246 alu.src[0].sel += (id / 2);
8247 alu.src[0].chan = ((id % 2) * 2) + 1;
8248 alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
8249 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
8250 alu.last = 1;
8251 r = r600_bytecode_add_alu(ctx->bc, &alu);
8252 if (r)
8253 return r;
8254 /* disable writemask from texture instruction */
8255 inst->Dst[0].Register.WriteMask &= ~4;
8256 }
8257 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
8258 tex.op = ctx->inst_info->op;
8259 tex.sampler_id = R600_IMAGE_REAL_RESOURCE_OFFSET + inst->Src[0].Register.Index;
8260 tex.sampler_index_mode = sampler_index_mode;
8261 tex.resource_id = tex.sampler_id;
8262 tex.resource_index_mode = sampler_index_mode;
8263 tex.src_sel_x = 4;
8264 tex.src_sel_y = 4;
8265 tex.src_sel_z = 4;
8266 tex.src_sel_w = 4;
8267 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
8268 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
8269 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
8270 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
8271 tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
8272 r = r600_bytecode_add_tex(ctx->bc, &tex);
8273 if (r)
8274 return r;
8275
8276 return 0;
8277 }
8278
8279 static int tgsi_lrp(struct r600_shader_ctx *ctx)
8280 {
8281 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8282 struct r600_bytecode_alu alu;
8283 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
8284 unsigned i, temp_regs[2];
8285 int r;
8286
8287 /* optimize if it's just an equal balance */
8288 if (ctx->src[0].sel == V_SQ_ALU_SRC_0_5) {
8289 for (i = 0; i < lasti + 1; i++) {
8290 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
8291 continue;
8292
8293 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8294 alu.op = ALU_OP2_ADD;
8295 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
8296 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
8297 alu.omod = 3;
8298 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
8299 alu.dst.chan = i;
8300 if (i == lasti) {
8301 alu.last = 1;
8302 }
8303 r = r600_bytecode_add_alu(ctx->bc, &alu);
8304 if (r)
8305 return r;
8306 }
8307 return 0;
8308 }
8309
8310 /* 1 - src0 */
8311 for (i = 0; i < lasti + 1; i++) {
8312 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
8313 continue;
8314
8315 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8316 alu.op = ALU_OP2_ADD;
8317 alu.src[0].sel = V_SQ_ALU_SRC_1;
8318 alu.src[0].chan = 0;
8319 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
8320 r600_bytecode_src_toggle_neg(&alu.src[1]);
8321 alu.dst.sel = ctx->temp_reg;
8322 alu.dst.chan = i;
8323 if (i == lasti) {
8324 alu.last = 1;
8325 }
8326 alu.dst.write = 1;
8327 r = r600_bytecode_add_alu(ctx->bc, &alu);
8328 if (r)
8329 return r;
8330 }
8331
8332 /* (1 - src0) * src2 */
8333 for (i = 0; i < lasti + 1; i++) {
8334 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
8335 continue;
8336
8337 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8338 alu.op = ALU_OP2_MUL;
8339 alu.src[0].sel = ctx->temp_reg;
8340 alu.src[0].chan = i;
8341 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
8342 alu.dst.sel = ctx->temp_reg;
8343 alu.dst.chan = i;
8344 if (i == lasti) {
8345 alu.last = 1;
8346 }
8347 alu.dst.write = 1;
8348 r = r600_bytecode_add_alu(ctx->bc, &alu);
8349 if (r)
8350 return r;
8351 }
8352
8353 /* src0 * src1 + (1 - src0) * src2 */
8354 if (ctx->src[0].abs)
8355 temp_regs[0] = r600_get_temp(ctx);
8356 else
8357 temp_regs[0] = 0;
8358 if (ctx->src[1].abs)
8359 temp_regs[1] = r600_get_temp(ctx);
8360 else
8361 temp_regs[1] = 0;
8362
8363 for (i = 0; i < lasti + 1; i++) {
8364 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
8365 continue;
8366
8367 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8368 alu.op = ALU_OP3_MULADD;
8369 alu.is_op3 = 1;
8370 r = tgsi_make_src_for_op3(ctx, temp_regs[0], i, &alu.src[0], &ctx->src[0]);
8371 if (r)
8372 return r;
8373 r = tgsi_make_src_for_op3(ctx, temp_regs[1], i, &alu.src[1], &ctx->src[1]);
8374 if (r)
8375 return r;
8376 alu.src[2].sel = ctx->temp_reg;
8377 alu.src[2].chan = i;
8378
8379 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
8380 alu.dst.chan = i;
8381 if (i == lasti) {
8382 alu.last = 1;
8383 }
8384 r = r600_bytecode_add_alu(ctx->bc, &alu);
8385 if (r)
8386 return r;
8387 }
8388 return 0;
8389 }
8390
8391 static int tgsi_cmp(struct r600_shader_ctx *ctx)
8392 {
8393 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8394 struct r600_bytecode_alu alu;
8395 int i, r, j;
8396 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
8397 int temp_regs[3];
8398 unsigned op;
8399
8400 if (ctx->src[0].abs && ctx->src[0].neg) {
8401 op = ALU_OP3_CNDE;
8402 ctx->src[0].abs = 0;
8403 ctx->src[0].neg = 0;
8404 } else {
8405 op = ALU_OP3_CNDGE;
8406 }
8407
8408 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
8409 temp_regs[j] = 0;
8410 if (ctx->src[j].abs)
8411 temp_regs[j] = r600_get_temp(ctx);
8412 }
8413
8414 for (i = 0; i < lasti + 1; i++) {
8415 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
8416 continue;
8417
8418 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8419 alu.op = op;
8420 r = tgsi_make_src_for_op3(ctx, temp_regs[0], i, &alu.src[0], &ctx->src[0]);
8421 if (r)
8422 return r;
8423 r = tgsi_make_src_for_op3(ctx, temp_regs[2], i, &alu.src[1], &ctx->src[2]);
8424 if (r)
8425 return r;
8426 r = tgsi_make_src_for_op3(ctx, temp_regs[1], i, &alu.src[2], &ctx->src[1]);
8427 if (r)
8428 return r;
8429 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
8430 alu.dst.chan = i;
8431 alu.dst.write = 1;
8432 alu.is_op3 = 1;
8433 if (i == lasti)
8434 alu.last = 1;
8435 r = r600_bytecode_add_alu(ctx->bc, &alu);
8436 if (r)
8437 return r;
8438 }
8439 return 0;
8440 }
8441
8442 static int tgsi_ucmp(struct r600_shader_ctx *ctx)
8443 {
8444 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8445 struct r600_bytecode_alu alu;
8446 int i, r;
8447 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
8448
8449 for (i = 0; i < lasti + 1; i++) {
8450 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
8451 continue;
8452
8453 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8454 alu.op = ALU_OP3_CNDE_INT;
8455 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
8456 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
8457 r600_bytecode_src(&alu.src[2], &ctx->src[1], i);
8458 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
8459 alu.dst.chan = i;
8460 alu.dst.write = 1;
8461 alu.is_op3 = 1;
8462 if (i == lasti)
8463 alu.last = 1;
8464 r = r600_bytecode_add_alu(ctx->bc, &alu);
8465 if (r)
8466 return r;
8467 }
8468 return 0;
8469 }
8470
8471 static int tgsi_exp(struct r600_shader_ctx *ctx)
8472 {
8473 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8474 struct r600_bytecode_alu alu;
8475 int r;
8476 unsigned i;
8477
8478 /* result.x = 2^floor(src); */
8479 if (inst->Dst[0].Register.WriteMask & 1) {
8480 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8481
8482 alu.op = ALU_OP1_FLOOR;
8483 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
8484
8485 alu.dst.sel = ctx->temp_reg;
8486 alu.dst.chan = 0;
8487 alu.dst.write = 1;
8488 alu.last = 1;
8489 r = r600_bytecode_add_alu(ctx->bc, &alu);
8490 if (r)
8491 return r;
8492
8493 if (ctx->bc->chip_class == CAYMAN) {
8494 for (i = 0; i < 3; i++) {
8495 alu.op = ALU_OP1_EXP_IEEE;
8496 alu.src[0].sel = ctx->temp_reg;
8497 alu.src[0].chan = 0;
8498
8499 alu.dst.sel = ctx->temp_reg;
8500 alu.dst.chan = i;
8501 alu.dst.write = i == 0;
8502 alu.last = i == 2;
8503 r = r600_bytecode_add_alu(ctx->bc, &alu);
8504 if (r)
8505 return r;
8506 }
8507 } else {
8508 alu.op = ALU_OP1_EXP_IEEE;
8509 alu.src[0].sel = ctx->temp_reg;
8510 alu.src[0].chan = 0;
8511
8512 alu.dst.sel = ctx->temp_reg;
8513 alu.dst.chan = 0;
8514 alu.dst.write = 1;
8515 alu.last = 1;
8516 r = r600_bytecode_add_alu(ctx->bc, &alu);
8517 if (r)
8518 return r;
8519 }
8520 }
8521
8522 /* result.y = tmp - floor(tmp); */
8523 if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
8524 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8525
8526 alu.op = ALU_OP1_FRACT;
8527 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
8528
8529 alu.dst.sel = ctx->temp_reg;
8530 #if 0
8531 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
8532 if (r)
8533 return r;
8534 #endif
8535 alu.dst.write = 1;
8536 alu.dst.chan = 1;
8537
8538 alu.last = 1;
8539
8540 r = r600_bytecode_add_alu(ctx->bc, &alu);
8541 if (r)
8542 return r;
8543 }
8544
8545 /* result.z = RoughApprox2ToX(tmp);*/
8546 if ((inst->Dst[0].Register.WriteMask >> 2) & 0x1) {
8547 if (ctx->bc->chip_class == CAYMAN) {
8548 for (i = 0; i < 3; i++) {
8549 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8550 alu.op = ALU_OP1_EXP_IEEE;
8551 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
8552
8553 alu.dst.sel = ctx->temp_reg;
8554 alu.dst.chan = i;
8555 if (i == 2) {
8556 alu.dst.write = 1;
8557 alu.last = 1;
8558 }
8559
8560 r = r600_bytecode_add_alu(ctx->bc, &alu);
8561 if (r)
8562 return r;
8563 }
8564 } else {
8565 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8566 alu.op = ALU_OP1_EXP_IEEE;
8567 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
8568
8569 alu.dst.sel = ctx->temp_reg;
8570 alu.dst.write = 1;
8571 alu.dst.chan = 2;
8572
8573 alu.last = 1;
8574
8575 r = r600_bytecode_add_alu(ctx->bc, &alu);
8576 if (r)
8577 return r;
8578 }
8579 }
8580
8581 /* result.w = 1.0;*/
8582 if ((inst->Dst[0].Register.WriteMask >> 3) & 0x1) {
8583 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8584
8585 alu.op = ALU_OP1_MOV;
8586 alu.src[0].sel = V_SQ_ALU_SRC_1;
8587 alu.src[0].chan = 0;
8588
8589 alu.dst.sel = ctx->temp_reg;
8590 alu.dst.chan = 3;
8591 alu.dst.write = 1;
8592 alu.last = 1;
8593 r = r600_bytecode_add_alu(ctx->bc, &alu);
8594 if (r)
8595 return r;
8596 }
8597 return tgsi_helper_copy(ctx, inst);
8598 }
8599
8600 static int tgsi_log(struct r600_shader_ctx *ctx)
8601 {
8602 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8603 struct r600_bytecode_alu alu;
8604 int r;
8605 unsigned i;
8606
8607 /* result.x = floor(log2(|src|)); */
8608 if (inst->Dst[0].Register.WriteMask & 1) {
8609 if (ctx->bc->chip_class == CAYMAN) {
8610 for (i = 0; i < 3; i++) {
8611 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8612
8613 alu.op = ALU_OP1_LOG_IEEE;
8614 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
8615 r600_bytecode_src_set_abs(&alu.src[0]);
8616
8617 alu.dst.sel = ctx->temp_reg;
8618 alu.dst.chan = i;
8619 if (i == 0)
8620 alu.dst.write = 1;
8621 if (i == 2)
8622 alu.last = 1;
8623 r = r600_bytecode_add_alu(ctx->bc, &alu);
8624 if (r)
8625 return r;
8626 }
8627
8628 } else {
8629 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8630
8631 alu.op = ALU_OP1_LOG_IEEE;
8632 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
8633 r600_bytecode_src_set_abs(&alu.src[0]);
8634
8635 alu.dst.sel = ctx->temp_reg;
8636 alu.dst.chan = 0;
8637 alu.dst.write = 1;
8638 alu.last = 1;
8639 r = r600_bytecode_add_alu(ctx->bc, &alu);
8640 if (r)
8641 return r;
8642 }
8643
8644 alu.op = ALU_OP1_FLOOR;
8645 alu.src[0].sel = ctx->temp_reg;
8646 alu.src[0].chan = 0;
8647
8648 alu.dst.sel = ctx->temp_reg;
8649 alu.dst.chan = 0;
8650 alu.dst.write = 1;
8651 alu.last = 1;
8652
8653 r = r600_bytecode_add_alu(ctx->bc, &alu);
8654 if (r)
8655 return r;
8656 }
8657
8658 /* result.y = |src.x| / (2 ^ floor(log2(|src.x|))); */
8659 if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
8660
8661 if (ctx->bc->chip_class == CAYMAN) {
8662 for (i = 0; i < 3; i++) {
8663 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8664
8665 alu.op = ALU_OP1_LOG_IEEE;
8666 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
8667 r600_bytecode_src_set_abs(&alu.src[0]);
8668
8669 alu.dst.sel = ctx->temp_reg;
8670 alu.dst.chan = i;
8671 if (i == 1)
8672 alu.dst.write = 1;
8673 if (i == 2)
8674 alu.last = 1;
8675
8676 r = r600_bytecode_add_alu(ctx->bc, &alu);
8677 if (r)
8678 return r;
8679 }
8680 } else {
8681 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8682
8683 alu.op = ALU_OP1_LOG_IEEE;
8684 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
8685 r600_bytecode_src_set_abs(&alu.src[0]);
8686
8687 alu.dst.sel = ctx->temp_reg;
8688 alu.dst.chan = 1;
8689 alu.dst.write = 1;
8690 alu.last = 1;
8691
8692 r = r600_bytecode_add_alu(ctx->bc, &alu);
8693 if (r)
8694 return r;
8695 }
8696
8697 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8698
8699 alu.op = ALU_OP1_FLOOR;
8700 alu.src[0].sel = ctx->temp_reg;
8701 alu.src[0].chan = 1;
8702
8703 alu.dst.sel = ctx->temp_reg;
8704 alu.dst.chan = 1;
8705 alu.dst.write = 1;
8706 alu.last = 1;
8707
8708 r = r600_bytecode_add_alu(ctx->bc, &alu);
8709 if (r)
8710 return r;
8711
8712 if (ctx->bc->chip_class == CAYMAN) {
8713 for (i = 0; i < 3; i++) {
8714 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8715 alu.op = ALU_OP1_EXP_IEEE;
8716 alu.src[0].sel = ctx->temp_reg;
8717 alu.src[0].chan = 1;
8718
8719 alu.dst.sel = ctx->temp_reg;
8720 alu.dst.chan = i;
8721 if (i == 1)
8722 alu.dst.write = 1;
8723 if (i == 2)
8724 alu.last = 1;
8725
8726 r = r600_bytecode_add_alu(ctx->bc, &alu);
8727 if (r)
8728 return r;
8729 }
8730 } else {
8731 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8732 alu.op = ALU_OP1_EXP_IEEE;
8733 alu.src[0].sel = ctx->temp_reg;
8734 alu.src[0].chan = 1;
8735
8736 alu.dst.sel = ctx->temp_reg;
8737 alu.dst.chan = 1;
8738 alu.dst.write = 1;
8739 alu.last = 1;
8740
8741 r = r600_bytecode_add_alu(ctx->bc, &alu);
8742 if (r)
8743 return r;
8744 }
8745
8746 if (ctx->bc->chip_class == CAYMAN) {
8747 for (i = 0; i < 3; i++) {
8748 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8749 alu.op = ALU_OP1_RECIP_IEEE;
8750 alu.src[0].sel = ctx->temp_reg;
8751 alu.src[0].chan = 1;
8752
8753 alu.dst.sel = ctx->temp_reg;
8754 alu.dst.chan = i;
8755 if (i == 1)
8756 alu.dst.write = 1;
8757 if (i == 2)
8758 alu.last = 1;
8759
8760 r = r600_bytecode_add_alu(ctx->bc, &alu);
8761 if (r)
8762 return r;
8763 }
8764 } else {
8765 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8766 alu.op = ALU_OP1_RECIP_IEEE;
8767 alu.src[0].sel = ctx->temp_reg;
8768 alu.src[0].chan = 1;
8769
8770 alu.dst.sel = ctx->temp_reg;
8771 alu.dst.chan = 1;
8772 alu.dst.write = 1;
8773 alu.last = 1;
8774
8775 r = r600_bytecode_add_alu(ctx->bc, &alu);
8776 if (r)
8777 return r;
8778 }
8779
8780 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8781
8782 alu.op = ALU_OP2_MUL;
8783
8784 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
8785 r600_bytecode_src_set_abs(&alu.src[0]);
8786
8787 alu.src[1].sel = ctx->temp_reg;
8788 alu.src[1].chan = 1;
8789
8790 alu.dst.sel = ctx->temp_reg;
8791 alu.dst.chan = 1;
8792 alu.dst.write = 1;
8793 alu.last = 1;
8794
8795 r = r600_bytecode_add_alu(ctx->bc, &alu);
8796 if (r)
8797 return r;
8798 }
8799
8800 /* result.z = log2(|src|);*/
8801 if ((inst->Dst[0].Register.WriteMask >> 2) & 1) {
8802 if (ctx->bc->chip_class == CAYMAN) {
8803 for (i = 0; i < 3; i++) {
8804 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8805
8806 alu.op = ALU_OP1_LOG_IEEE;
8807 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
8808 r600_bytecode_src_set_abs(&alu.src[0]);
8809
8810 alu.dst.sel = ctx->temp_reg;
8811 if (i == 2)
8812 alu.dst.write = 1;
8813 alu.dst.chan = i;
8814 if (i == 2)
8815 alu.last = 1;
8816
8817 r = r600_bytecode_add_alu(ctx->bc, &alu);
8818 if (r)
8819 return r;
8820 }
8821 } else {
8822 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8823
8824 alu.op = ALU_OP1_LOG_IEEE;
8825 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
8826 r600_bytecode_src_set_abs(&alu.src[0]);
8827
8828 alu.dst.sel = ctx->temp_reg;
8829 alu.dst.write = 1;
8830 alu.dst.chan = 2;
8831 alu.last = 1;
8832
8833 r = r600_bytecode_add_alu(ctx->bc, &alu);
8834 if (r)
8835 return r;
8836 }
8837 }
8838
8839 /* result.w = 1.0; */
8840 if ((inst->Dst[0].Register.WriteMask >> 3) & 1) {
8841 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8842
8843 alu.op = ALU_OP1_MOV;
8844 alu.src[0].sel = V_SQ_ALU_SRC_1;
8845 alu.src[0].chan = 0;
8846
8847 alu.dst.sel = ctx->temp_reg;
8848 alu.dst.chan = 3;
8849 alu.dst.write = 1;
8850 alu.last = 1;
8851
8852 r = r600_bytecode_add_alu(ctx->bc, &alu);
8853 if (r)
8854 return r;
8855 }
8856
8857 return tgsi_helper_copy(ctx, inst);
8858 }
8859
8860 static int tgsi_eg_arl(struct r600_shader_ctx *ctx)
8861 {
8862 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8863 struct r600_bytecode_alu alu;
8864 int r;
8865 int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
8866 unsigned reg = get_address_file_reg(ctx, inst->Dst[0].Register.Index);
8867
8868 assert(inst->Dst[0].Register.Index < 3);
8869 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8870
8871 switch (inst->Instruction.Opcode) {
8872 case TGSI_OPCODE_ARL:
8873 alu.op = ALU_OP1_FLT_TO_INT_FLOOR;
8874 break;
8875 case TGSI_OPCODE_ARR:
8876 alu.op = ALU_OP1_FLT_TO_INT;
8877 break;
8878 case TGSI_OPCODE_UARL:
8879 alu.op = ALU_OP1_MOV;
8880 break;
8881 default:
8882 assert(0);
8883 return -1;
8884 }
8885
8886 for (i = 0; i <= lasti; ++i) {
8887 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
8888 continue;
8889 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
8890 alu.last = i == lasti;
8891 alu.dst.sel = reg;
8892 alu.dst.chan = i;
8893 alu.dst.write = 1;
8894 r = r600_bytecode_add_alu(ctx->bc, &alu);
8895 if (r)
8896 return r;
8897 }
8898
8899 if (inst->Dst[0].Register.Index > 0)
8900 ctx->bc->index_loaded[inst->Dst[0].Register.Index - 1] = 0;
8901 else
8902 ctx->bc->ar_loaded = 0;
8903
8904 return 0;
8905 }
8906 static int tgsi_r600_arl(struct r600_shader_ctx *ctx)
8907 {
8908 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8909 struct r600_bytecode_alu alu;
8910 int r;
8911 int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
8912
8913 switch (inst->Instruction.Opcode) {
8914 case TGSI_OPCODE_ARL:
8915 memset(&alu, 0, sizeof(alu));
8916 alu.op = ALU_OP1_FLOOR;
8917 alu.dst.sel = ctx->bc->ar_reg;
8918 alu.dst.write = 1;
8919 for (i = 0; i <= lasti; ++i) {
8920 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
8921 alu.dst.chan = i;
8922 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
8923 alu.last = i == lasti;
8924 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
8925 return r;
8926 }
8927 }
8928
8929 memset(&alu, 0, sizeof(alu));
8930 alu.op = ALU_OP1_FLT_TO_INT;
8931 alu.src[0].sel = ctx->bc->ar_reg;
8932 alu.dst.sel = ctx->bc->ar_reg;
8933 alu.dst.write = 1;
8934 /* FLT_TO_INT is trans-only on r600/r700 */
8935 alu.last = TRUE;
8936 for (i = 0; i <= lasti; ++i) {
8937 alu.dst.chan = i;
8938 alu.src[0].chan = i;
8939 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
8940 return r;
8941 }
8942 break;
8943 case TGSI_OPCODE_ARR:
8944 memset(&alu, 0, sizeof(alu));
8945 alu.op = ALU_OP1_FLT_TO_INT;
8946 alu.dst.sel = ctx->bc->ar_reg;
8947 alu.dst.write = 1;
8948 /* FLT_TO_INT is trans-only on r600/r700 */
8949 alu.last = TRUE;
8950 for (i = 0; i <= lasti; ++i) {
8951 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
8952 alu.dst.chan = i;
8953 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
8954 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
8955 return r;
8956 }
8957 }
8958 break;
8959 case TGSI_OPCODE_UARL:
8960 memset(&alu, 0, sizeof(alu));
8961 alu.op = ALU_OP1_MOV;
8962 alu.dst.sel = ctx->bc->ar_reg;
8963 alu.dst.write = 1;
8964 for (i = 0; i <= lasti; ++i) {
8965 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
8966 alu.dst.chan = i;
8967 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
8968 alu.last = i == lasti;
8969 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
8970 return r;
8971 }
8972 }
8973 break;
8974 default:
8975 assert(0);
8976 return -1;
8977 }
8978
8979 ctx->bc->ar_loaded = 0;
8980 return 0;
8981 }
8982
8983 static int tgsi_opdst(struct r600_shader_ctx *ctx)
8984 {
8985 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
8986 struct r600_bytecode_alu alu;
8987 int i, r = 0;
8988
8989 for (i = 0; i < 4; i++) {
8990 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
8991
8992 alu.op = ALU_OP2_MUL;
8993 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
8994
8995 if (i == 0 || i == 3) {
8996 alu.src[0].sel = V_SQ_ALU_SRC_1;
8997 } else {
8998 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
8999 }
9000
9001 if (i == 0 || i == 2) {
9002 alu.src[1].sel = V_SQ_ALU_SRC_1;
9003 } else {
9004 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
9005 }
9006 if (i == 3)
9007 alu.last = 1;
9008 r = r600_bytecode_add_alu(ctx->bc, &alu);
9009 if (r)
9010 return r;
9011 }
9012 return 0;
9013 }
9014
9015 static int emit_logic_pred(struct r600_shader_ctx *ctx, int opcode, int alu_type)
9016 {
9017 struct r600_bytecode_alu alu;
9018 int r;
9019
9020 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9021 alu.op = opcode;
9022 alu.execute_mask = 1;
9023 alu.update_pred = 1;
9024
9025 alu.dst.sel = ctx->temp_reg;
9026 alu.dst.write = 1;
9027 alu.dst.chan = 0;
9028
9029 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9030 alu.src[1].sel = V_SQ_ALU_SRC_0;
9031 alu.src[1].chan = 0;
9032
9033 alu.last = 1;
9034
9035 r = r600_bytecode_add_alu_type(ctx->bc, &alu, alu_type);
9036 if (r)
9037 return r;
9038 return 0;
9039 }
9040
9041 static int pops(struct r600_shader_ctx *ctx, int pops)
9042 {
9043 unsigned force_pop = ctx->bc->force_add_cf;
9044
9045 if (!force_pop) {
9046 int alu_pop = 3;
9047 if (ctx->bc->cf_last) {
9048 if (ctx->bc->cf_last->op == CF_OP_ALU)
9049 alu_pop = 0;
9050 else if (ctx->bc->cf_last->op == CF_OP_ALU_POP_AFTER)
9051 alu_pop = 1;
9052 }
9053 alu_pop += pops;
9054 if (alu_pop == 1) {
9055 ctx->bc->cf_last->op = CF_OP_ALU_POP_AFTER;
9056 ctx->bc->force_add_cf = 1;
9057 } else if (alu_pop == 2) {
9058 ctx->bc->cf_last->op = CF_OP_ALU_POP2_AFTER;
9059 ctx->bc->force_add_cf = 1;
9060 } else {
9061 force_pop = 1;
9062 }
9063 }
9064
9065 if (force_pop) {
9066 r600_bytecode_add_cfinst(ctx->bc, CF_OP_POP);
9067 ctx->bc->cf_last->pop_count = pops;
9068 ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2;
9069 }
9070
9071 return 0;
9072 }
9073
9074 static inline void callstack_update_max_depth(struct r600_shader_ctx *ctx,
9075 unsigned reason)
9076 {
9077 struct r600_stack_info *stack = &ctx->bc->stack;
9078 unsigned elements, entries;
9079
9080 unsigned entry_size = stack->entry_size;
9081
9082 elements = (stack->loop + stack->push_wqm ) * entry_size;
9083 elements += stack->push;
9084
9085 switch (ctx->bc->chip_class) {
9086 case R600:
9087 case R700:
9088 /* pre-r8xx: if any non-WQM PUSH instruction is invoked, 2 elements on
9089 * the stack must be reserved to hold the current active/continue
9090 * masks */
9091 if (reason == FC_PUSH_VPM) {
9092 elements += 2;
9093 }
9094 break;
9095
9096 case CAYMAN:
9097 /* r9xx: any stack operation on empty stack consumes 2 additional
9098 * elements */
9099 elements += 2;
9100
9101 /* fallthrough */
9102 /* FIXME: do the two elements added above cover the cases for the
9103 * r8xx+ below? */
9104
9105 case EVERGREEN:
9106 /* r8xx+: 2 extra elements are not always required, but one extra
9107 * element must be added for each of the following cases:
9108 * 1. There is an ALU_ELSE_AFTER instruction at the point of greatest
9109 * stack usage.
9110 * (Currently we don't use ALU_ELSE_AFTER.)
9111 * 2. There are LOOP/WQM frames on the stack when any flavor of non-WQM
9112 * PUSH instruction executed.
9113 *
9114 * NOTE: it seems we also need to reserve additional element in some
9115 * other cases, e.g. when we have 4 levels of PUSH_VPM in the shader,
9116 * then STACK_SIZE should be 2 instead of 1 */
9117 if (reason == FC_PUSH_VPM) {
9118 elements += 1;
9119 }
9120 break;
9121
9122 default:
9123 assert(0);
9124 break;
9125 }
9126
9127 /* NOTE: it seems STACK_SIZE is interpreted by hw as if entry_size is 4
9128 * for all chips, so we use 4 in the final formula, not the real entry_size
9129 * for the chip */
9130 entry_size = 4;
9131
9132 entries = (elements + (entry_size - 1)) / entry_size;
9133
9134 if (entries > stack->max_entries)
9135 stack->max_entries = entries;
9136 }
9137
9138 static inline void callstack_pop(struct r600_shader_ctx *ctx, unsigned reason)
9139 {
9140 switch(reason) {
9141 case FC_PUSH_VPM:
9142 --ctx->bc->stack.push;
9143 assert(ctx->bc->stack.push >= 0);
9144 break;
9145 case FC_PUSH_WQM:
9146 --ctx->bc->stack.push_wqm;
9147 assert(ctx->bc->stack.push_wqm >= 0);
9148 break;
9149 case FC_LOOP:
9150 --ctx->bc->stack.loop;
9151 assert(ctx->bc->stack.loop >= 0);
9152 break;
9153 default:
9154 assert(0);
9155 break;
9156 }
9157 }
9158
9159 static inline void callstack_push(struct r600_shader_ctx *ctx, unsigned reason)
9160 {
9161 switch (reason) {
9162 case FC_PUSH_VPM:
9163 ++ctx->bc->stack.push;
9164 break;
9165 case FC_PUSH_WQM:
9166 ++ctx->bc->stack.push_wqm;
9167 case FC_LOOP:
9168 ++ctx->bc->stack.loop;
9169 break;
9170 default:
9171 assert(0);
9172 }
9173
9174 callstack_update_max_depth(ctx, reason);
9175 }
9176
9177 static void fc_set_mid(struct r600_shader_ctx *ctx, int fc_sp)
9178 {
9179 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[fc_sp];
9180
9181 sp->mid = realloc((void *)sp->mid,
9182 sizeof(struct r600_bytecode_cf *) * (sp->num_mid + 1));
9183 sp->mid[sp->num_mid] = ctx->bc->cf_last;
9184 sp->num_mid++;
9185 }
9186
9187 static void fc_pushlevel(struct r600_shader_ctx *ctx, int type)
9188 {
9189 assert(ctx->bc->fc_sp < ARRAY_SIZE(ctx->bc->fc_stack));
9190 ctx->bc->fc_stack[ctx->bc->fc_sp].type = type;
9191 ctx->bc->fc_stack[ctx->bc->fc_sp].start = ctx->bc->cf_last;
9192 ctx->bc->fc_sp++;
9193 }
9194
9195 static void fc_poplevel(struct r600_shader_ctx *ctx)
9196 {
9197 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[ctx->bc->fc_sp - 1];
9198 free(sp->mid);
9199 sp->mid = NULL;
9200 sp->num_mid = 0;
9201 sp->start = NULL;
9202 sp->type = 0;
9203 ctx->bc->fc_sp--;
9204 }
9205
9206 #if 0
9207 static int emit_return(struct r600_shader_ctx *ctx)
9208 {
9209 r600_bytecode_add_cfinst(ctx->bc, CF_OP_RETURN));
9210 return 0;
9211 }
9212
9213 static int emit_jump_to_offset(struct r600_shader_ctx *ctx, int pops, int offset)
9214 {
9215
9216 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP));
9217 ctx->bc->cf_last->pop_count = pops;
9218 /* XXX work out offset */
9219 return 0;
9220 }
9221
9222 static int emit_setret_in_loop_flag(struct r600_shader_ctx *ctx, unsigned flag_value)
9223 {
9224 return 0;
9225 }
9226
9227 static void emit_testflag(struct r600_shader_ctx *ctx)
9228 {
9229
9230 }
9231
9232 static void emit_return_on_flag(struct r600_shader_ctx *ctx, unsigned ifidx)
9233 {
9234 emit_testflag(ctx);
9235 emit_jump_to_offset(ctx, 1, 4);
9236 emit_setret_in_loop_flag(ctx, V_SQ_ALU_SRC_0);
9237 pops(ctx, ifidx + 1);
9238 emit_return(ctx);
9239 }
9240
9241 static void break_loop_on_flag(struct r600_shader_ctx *ctx, unsigned fc_sp)
9242 {
9243 emit_testflag(ctx);
9244
9245 r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
9246 ctx->bc->cf_last->pop_count = 1;
9247
9248 fc_set_mid(ctx, fc_sp);
9249
9250 pops(ctx, 1);
9251 }
9252 #endif
9253
9254 static int emit_if(struct r600_shader_ctx *ctx, int opcode)
9255 {
9256 int alu_type = CF_OP_ALU_PUSH_BEFORE;
9257
9258 /* There is a hardware bug on Cayman where a BREAK/CONTINUE followed by
9259 * LOOP_STARTxxx for nested loops may put the branch stack into a state
9260 * such that ALU_PUSH_BEFORE doesn't work as expected. Workaround this
9261 * by replacing the ALU_PUSH_BEFORE with a PUSH + ALU */
9262 if (ctx->bc->chip_class == CAYMAN && ctx->bc->stack.loop > 1) {
9263 r600_bytecode_add_cfinst(ctx->bc, CF_OP_PUSH);
9264 ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2;
9265 alu_type = CF_OP_ALU;
9266 }
9267
9268 emit_logic_pred(ctx, opcode, alu_type);
9269
9270 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP);
9271
9272 fc_pushlevel(ctx, FC_IF);
9273
9274 callstack_push(ctx, FC_PUSH_VPM);
9275 return 0;
9276 }
9277
9278 static int tgsi_if(struct r600_shader_ctx *ctx)
9279 {
9280 return emit_if(ctx, ALU_OP2_PRED_SETNE);
9281 }
9282
9283 static int tgsi_uif(struct r600_shader_ctx *ctx)
9284 {
9285 return emit_if(ctx, ALU_OP2_PRED_SETNE_INT);
9286 }
9287
9288 static int tgsi_else(struct r600_shader_ctx *ctx)
9289 {
9290 r600_bytecode_add_cfinst(ctx->bc, CF_OP_ELSE);
9291 ctx->bc->cf_last->pop_count = 1;
9292
9293 fc_set_mid(ctx, ctx->bc->fc_sp - 1);
9294 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id;
9295 return 0;
9296 }
9297
9298 static int tgsi_endif(struct r600_shader_ctx *ctx)
9299 {
9300 pops(ctx, 1);
9301 if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].type != FC_IF) {
9302 R600_ERR("if/endif unbalanced in shader\n");
9303 return -1;
9304 }
9305
9306 if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid == NULL) {
9307 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id + 2;
9308 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->pop_count = 1;
9309 } else {
9310 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid[0]->cf_addr = ctx->bc->cf_last->id + 2;
9311 }
9312 fc_poplevel(ctx);
9313
9314 callstack_pop(ctx, FC_PUSH_VPM);
9315 return 0;
9316 }
9317
9318 static int tgsi_bgnloop(struct r600_shader_ctx *ctx)
9319 {
9320 /* LOOP_START_DX10 ignores the LOOP_CONFIG* registers, so it is not
9321 * limited to 4096 iterations, like the other LOOP_* instructions. */
9322 r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_START_DX10);
9323
9324 fc_pushlevel(ctx, FC_LOOP);
9325
9326 /* check stack depth */
9327 callstack_push(ctx, FC_LOOP);
9328 return 0;
9329 }
9330
9331 static int tgsi_endloop(struct r600_shader_ctx *ctx)
9332 {
9333 unsigned i;
9334
9335 r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_END);
9336
9337 if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].type != FC_LOOP) {
9338 R600_ERR("loop/endloop in shader code are not paired.\n");
9339 return -EINVAL;
9340 }
9341
9342 /* fixup loop pointers - from r600isa
9343 LOOP END points to CF after LOOP START,
9344 LOOP START point to CF after LOOP END
9345 BRK/CONT point to LOOP END CF
9346 */
9347 ctx->bc->cf_last->cf_addr = ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->id + 2;
9348
9349 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id + 2;
9350
9351 for (i = 0; i < ctx->bc->fc_stack[ctx->bc->fc_sp - 1].num_mid; i++) {
9352 ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid[i]->cf_addr = ctx->bc->cf_last->id;
9353 }
9354 /* XXX add LOOPRET support */
9355 fc_poplevel(ctx);
9356 callstack_pop(ctx, FC_LOOP);
9357 return 0;
9358 }
9359
9360 static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx)
9361 {
9362 unsigned int fscp;
9363
9364 for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--)
9365 {
9366 if (FC_LOOP == ctx->bc->fc_stack[fscp - 1].type)
9367 break;
9368 }
9369
9370 if (fscp == 0) {
9371 R600_ERR("Break not inside loop/endloop pair\n");
9372 return -EINVAL;
9373 }
9374
9375 r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
9376
9377 fc_set_mid(ctx, fscp - 1);
9378
9379 return 0;
9380 }
9381
9382 static int tgsi_gs_emit(struct r600_shader_ctx *ctx)
9383 {
9384 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9385 int stream = ctx->literals[inst->Src[0].Register.Index * 4 + inst->Src[0].Register.SwizzleX];
9386 int r;
9387
9388 if (ctx->inst_info->op == CF_OP_EMIT_VERTEX)
9389 emit_gs_ring_writes(ctx, ctx->gs_stream_output_info, stream, TRUE);
9390
9391 r = r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
9392 if (!r) {
9393 ctx->bc->cf_last->count = stream; // Count field for CUT/EMIT_VERTEX indicates which stream
9394 if (ctx->inst_info->op == CF_OP_EMIT_VERTEX)
9395 return emit_inc_ring_offset(ctx, stream, TRUE);
9396 }
9397 return r;
9398 }
9399
9400 static int tgsi_umad(struct r600_shader_ctx *ctx)
9401 {
9402 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9403 struct r600_bytecode_alu alu;
9404 int i, j, k, r;
9405 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
9406
9407 /* src0 * src1 */
9408 for (i = 0; i < lasti + 1; i++) {
9409 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
9410 continue;
9411
9412 if (ctx->bc->chip_class == CAYMAN) {
9413 for (j = 0 ; j < 4; j++) {
9414 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9415
9416 alu.op = ALU_OP2_MULLO_UINT;
9417 for (k = 0; k < inst->Instruction.NumSrcRegs; k++) {
9418 r600_bytecode_src(&alu.src[k], &ctx->src[k], i);
9419 }
9420 alu.dst.chan = j;
9421 alu.dst.sel = ctx->temp_reg;
9422 alu.dst.write = (j == i);
9423 if (j == 3)
9424 alu.last = 1;
9425 r = r600_bytecode_add_alu(ctx->bc, &alu);
9426 if (r)
9427 return r;
9428 }
9429 } else {
9430 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9431
9432 alu.dst.chan = i;
9433 alu.dst.sel = ctx->temp_reg;
9434 alu.dst.write = 1;
9435
9436 alu.op = ALU_OP2_MULLO_UINT;
9437 for (j = 0; j < 2; j++) {
9438 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
9439 }
9440
9441 alu.last = 1;
9442 r = r600_bytecode_add_alu(ctx->bc, &alu);
9443 if (r)
9444 return r;
9445 }
9446 }
9447
9448
9449 for (i = 0; i < lasti + 1; i++) {
9450 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
9451 continue;
9452
9453 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9454 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
9455
9456 alu.op = ALU_OP2_ADD_INT;
9457
9458 alu.src[0].sel = ctx->temp_reg;
9459 alu.src[0].chan = i;
9460
9461 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
9462 if (i == lasti) {
9463 alu.last = 1;
9464 }
9465 r = r600_bytecode_add_alu(ctx->bc, &alu);
9466 if (r)
9467 return r;
9468 }
9469 return 0;
9470 }
9471
9472 static int tgsi_pk2h(struct r600_shader_ctx *ctx)
9473 {
9474 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9475 struct r600_bytecode_alu alu;
9476 int r, i;
9477 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
9478
9479 /* temp.xy = f32_to_f16(src) */
9480 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9481 alu.op = ALU_OP1_FLT32_TO_FLT16;
9482 alu.dst.chan = 0;
9483 alu.dst.sel = ctx->temp_reg;
9484 alu.dst.write = 1;
9485 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9486 r = r600_bytecode_add_alu(ctx->bc, &alu);
9487 if (r)
9488 return r;
9489 alu.dst.chan = 1;
9490 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
9491 alu.last = 1;
9492 r = r600_bytecode_add_alu(ctx->bc, &alu);
9493 if (r)
9494 return r;
9495
9496 /* dst.x = temp.y * 0x10000 + temp.x */
9497 for (i = 0; i < lasti + 1; i++) {
9498 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
9499 continue;
9500
9501 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9502 alu.op = ALU_OP3_MULADD_UINT24;
9503 alu.is_op3 = 1;
9504 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
9505 alu.last = i == lasti;
9506 alu.src[0].sel = ctx->temp_reg;
9507 alu.src[0].chan = 1;
9508 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
9509 alu.src[1].value = 0x10000;
9510 alu.src[2].sel = ctx->temp_reg;
9511 alu.src[2].chan = 0;
9512 r = r600_bytecode_add_alu(ctx->bc, &alu);
9513 if (r)
9514 return r;
9515 }
9516
9517 return 0;
9518 }
9519
9520 static int tgsi_up2h(struct r600_shader_ctx *ctx)
9521 {
9522 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9523 struct r600_bytecode_alu alu;
9524 int r, i;
9525 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
9526
9527 /* temp.x = src.x */
9528 /* note: no need to mask out the high bits */
9529 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9530 alu.op = ALU_OP1_MOV;
9531 alu.dst.chan = 0;
9532 alu.dst.sel = ctx->temp_reg;
9533 alu.dst.write = 1;
9534 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9535 r = r600_bytecode_add_alu(ctx->bc, &alu);
9536 if (r)
9537 return r;
9538
9539 /* temp.y = src.x >> 16 */
9540 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9541 alu.op = ALU_OP2_LSHR_INT;
9542 alu.dst.chan = 1;
9543 alu.dst.sel = ctx->temp_reg;
9544 alu.dst.write = 1;
9545 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
9546 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
9547 alu.src[1].value = 16;
9548 alu.last = 1;
9549 r = r600_bytecode_add_alu(ctx->bc, &alu);
9550 if (r)
9551 return r;
9552
9553 /* dst.wz = dst.xy = f16_to_f32(temp.xy) */
9554 for (i = 0; i < lasti + 1; i++) {
9555 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
9556 continue;
9557 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9558 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
9559 alu.op = ALU_OP1_FLT16_TO_FLT32;
9560 alu.src[0].sel = ctx->temp_reg;
9561 alu.src[0].chan = i % 2;
9562 alu.last = i == lasti;
9563 r = r600_bytecode_add_alu(ctx->bc, &alu);
9564 if (r)
9565 return r;
9566 }
9567
9568 return 0;
9569 }
9570
9571 static int tgsi_bfe(struct r600_shader_ctx *ctx)
9572 {
9573 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
9574 struct r600_bytecode_alu alu;
9575 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
9576 int r, i;
9577
9578 r = tgsi_op3(ctx);
9579 if (r)
9580 return r;
9581
9582 for (i = 0; i < lasti + 1; i++) {
9583 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9584 alu.op = ALU_OP2_SETGE_INT;
9585 r600_bytecode_src(&alu.src[0], &ctx->src[2], i);
9586 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
9587 alu.src[1].value = 32;
9588 alu.dst.sel = ctx->temp_reg;
9589 alu.dst.chan = i;
9590 alu.dst.write = 1;
9591 if (i == lasti)
9592 alu.last = 1;
9593 r = r600_bytecode_add_alu(ctx->bc, &alu);
9594 if (r)
9595 return r;
9596 }
9597
9598 for (i = 0; i < lasti + 1; i++) {
9599 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
9600 alu.op = ALU_OP3_CNDE_INT;
9601 alu.is_op3 = 1;
9602 alu.src[0].sel = ctx->temp_reg;
9603 alu.src[1].chan = i;
9604
9605 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
9606 alu.src[1].sel = alu.dst.sel;
9607 alu.src[1].chan = i;
9608 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
9609 alu.dst.write = 1;
9610 if (i == lasti)
9611 alu.last = 1;
9612 r = r600_bytecode_add_alu(ctx->bc, &alu);
9613 if (r)
9614 return r;
9615 }
9616
9617 return 0;
9618 }
9619
9620 static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] = {
9621 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_r600_arl},
9622 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
9623 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
9624
9625 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, tgsi_trans_srcx_replicate},
9626
9627 [TGSI_OPCODE_RSQ] = { ALU_OP0_NOP, tgsi_rsq},
9628 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
9629 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
9630 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL_IEEE, tgsi_op2},
9631 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
9632 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
9633 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
9634 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
9635 /* MIN_DX10 returns non-nan result if one src is NaN, MIN returns NaN */
9636 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN_DX10, tgsi_op2},
9637 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX_DX10, tgsi_op2},
9638 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
9639 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
9640 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD_IEEE, tgsi_op3},
9641 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
9642 [TGSI_OPCODE_FMA] = { ALU_OP0_NOP, tgsi_unsupported},
9643 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, tgsi_trans_srcx_replicate},
9644 [21] = { ALU_OP0_NOP, tgsi_unsupported},
9645 [22] = { ALU_OP0_NOP, tgsi_unsupported},
9646 [23] = { ALU_OP0_NOP, tgsi_unsupported},
9647 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
9648 [25] = { ALU_OP0_NOP, tgsi_unsupported},
9649 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
9650 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
9651 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, tgsi_trans_srcx_replicate},
9652 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, tgsi_trans_srcx_replicate},
9653 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, tgsi_pow},
9654 [31] = { ALU_OP0_NOP, tgsi_unsupported},
9655 [32] = { ALU_OP0_NOP, tgsi_unsupported},
9656 [33] = { ALU_OP0_NOP, tgsi_unsupported},
9657 [34] = { ALU_OP0_NOP, tgsi_unsupported},
9658 [35] = { ALU_OP0_NOP, tgsi_unsupported},
9659 [TGSI_OPCODE_COS] = { ALU_OP1_COS, tgsi_trig},
9660 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
9661 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
9662 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
9663 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_unsupported},
9664 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
9665 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
9666 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
9667 [44] = { ALU_OP0_NOP, tgsi_unsupported},
9668 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
9669 [46] = { ALU_OP0_NOP, tgsi_unsupported},
9670 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
9671 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, tgsi_trig},
9672 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
9673 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
9674 [51] = { ALU_OP0_NOP, tgsi_unsupported},
9675 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
9676 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
9677 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
9678 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_unsupported},
9679 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
9680 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
9681 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
9682 [59] = { ALU_OP0_NOP, tgsi_unsupported},
9683 [60] = { ALU_OP0_NOP, tgsi_unsupported},
9684 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_r600_arl},
9685 [62] = { ALU_OP0_NOP, tgsi_unsupported},
9686 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
9687 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
9688 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
9689 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
9690 [67] = { ALU_OP0_NOP, tgsi_unsupported},
9691 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
9692 [69] = { ALU_OP0_NOP, tgsi_unsupported},
9693 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
9694 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
9695 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
9696 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
9697 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
9698 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
9699 [76] = { ALU_OP0_NOP, tgsi_unsupported},
9700 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
9701 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
9702 [TGSI_OPCODE_DDX_FINE] = { ALU_OP0_NOP, tgsi_unsupported},
9703 [TGSI_OPCODE_DDY_FINE] = { ALU_OP0_NOP, tgsi_unsupported},
9704 [81] = { ALU_OP0_NOP, tgsi_unsupported},
9705 [82] = { ALU_OP0_NOP, tgsi_unsupported},
9706 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
9707 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2_trans},
9708 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
9709 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
9710 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2_trans},
9711 [88] = { ALU_OP0_NOP, tgsi_unsupported},
9712 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
9713 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
9714 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
9715 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
9716 [93] = { ALU_OP0_NOP, tgsi_unsupported},
9717 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
9718 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
9719 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
9720 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
9721 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
9722 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
9723 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
9724 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
9725 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
9726 [103] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
9727 [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
9728 [TGSI_OPCODE_RESQ] = { ALU_OP0_NOP, tgsi_unsupported},
9729 [106] = { ALU_OP0_NOP, tgsi_unsupported},
9730 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
9731 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
9732 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
9733 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
9734 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
9735 [TGSI_OPCODE_MEMBAR] = { ALU_OP0_NOP, tgsi_unsupported},
9736 [113] = { ALU_OP0_NOP, tgsi_unsupported},
9737 [114] = { ALU_OP0_NOP, tgsi_unsupported},
9738 [115] = { ALU_OP0_NOP, tgsi_unsupported},
9739 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
9740 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
9741 [TGSI_OPCODE_DFMA] = { ALU_OP0_NOP, tgsi_unsupported},
9742 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_op2_trans},
9743 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
9744 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
9745 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
9746 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
9747 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
9748 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2_trans},
9749 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
9750 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_op2_trans},
9751 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2_trans},
9752 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
9753 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
9754 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
9755 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
9756 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
9757 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
9758 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_UINT, tgsi_op2_trans},
9759 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
9760 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
9761 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2_trans},
9762 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
9763 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2_swap},
9764 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
9765 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
9766 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
9767 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
9768 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
9769 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
9770 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
9771 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
9772 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
9773 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
9774 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
9775 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
9776 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
9777 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
9778 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
9779 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
9780 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_r600_arl},
9781 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
9782 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
9783 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
9784 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_unsupported},
9785 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_unsupported},
9786 [163] = { ALU_OP0_NOP, tgsi_unsupported},
9787 [164] = { ALU_OP0_NOP, tgsi_unsupported},
9788 [165] = { ALU_OP0_NOP, tgsi_unsupported},
9789 [TGSI_OPCODE_BARRIER] = { ALU_OP0_NOP, tgsi_unsupported},
9790 [TGSI_OPCODE_ATOMUADD] = { ALU_OP0_NOP, tgsi_unsupported},
9791 [TGSI_OPCODE_ATOMXCHG] = { ALU_OP0_NOP, tgsi_unsupported},
9792 [TGSI_OPCODE_ATOMCAS] = { ALU_OP0_NOP, tgsi_unsupported},
9793 [TGSI_OPCODE_ATOMAND] = { ALU_OP0_NOP, tgsi_unsupported},
9794 [TGSI_OPCODE_ATOMOR] = { ALU_OP0_NOP, tgsi_unsupported},
9795 [TGSI_OPCODE_ATOMXOR] = { ALU_OP0_NOP, tgsi_unsupported},
9796 [TGSI_OPCODE_ATOMUMIN] = { ALU_OP0_NOP, tgsi_unsupported},
9797 [TGSI_OPCODE_ATOMUMAX] = { ALU_OP0_NOP, tgsi_unsupported},
9798 [TGSI_OPCODE_ATOMIMIN] = { ALU_OP0_NOP, tgsi_unsupported},
9799 [TGSI_OPCODE_ATOMIMAX] = { ALU_OP0_NOP, tgsi_unsupported},
9800 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
9801 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
9802 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
9803 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, tgsi_op2_trans},
9804 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, tgsi_op2_trans},
9805 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_unsupported},
9806 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_unsupported},
9807 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_unsupported},
9808 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_unsupported},
9809 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_unsupported},
9810 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_unsupported},
9811 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_unsupported},
9812 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_unsupported},
9813 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_unsupported},
9814 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_unsupported},
9815 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_unsupported},
9816 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_unsupported},
9817 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_unsupported},
9818 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
9819 };
9820
9821 static const struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = {
9822 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_eg_arl},
9823 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
9824 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
9825 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, tgsi_trans_srcx_replicate},
9826 [TGSI_OPCODE_RSQ] = { ALU_OP0_NOP, tgsi_rsq},
9827 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
9828 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
9829 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL_IEEE, tgsi_op2},
9830 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
9831 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
9832 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
9833 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
9834 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN_DX10, tgsi_op2},
9835 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX_DX10, tgsi_op2},
9836 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
9837 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
9838 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD_IEEE, tgsi_op3},
9839 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
9840 [TGSI_OPCODE_FMA] = { ALU_OP3_FMA, tgsi_op3},
9841 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, tgsi_trans_srcx_replicate},
9842 [21] = { ALU_OP0_NOP, tgsi_unsupported},
9843 [22] = { ALU_OP0_NOP, tgsi_unsupported},
9844 [23] = { ALU_OP0_NOP, tgsi_unsupported},
9845 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
9846 [25] = { ALU_OP0_NOP, tgsi_unsupported},
9847 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
9848 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
9849 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, tgsi_trans_srcx_replicate},
9850 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, tgsi_trans_srcx_replicate},
9851 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, tgsi_pow},
9852 [31] = { ALU_OP0_NOP, tgsi_unsupported},
9853 [32] = { ALU_OP0_NOP, tgsi_unsupported},
9854 [33] = { ALU_OP0_NOP, tgsi_unsupported},
9855 [34] = { ALU_OP0_NOP, tgsi_unsupported},
9856 [35] = { ALU_OP0_NOP, tgsi_unsupported},
9857 [TGSI_OPCODE_COS] = { ALU_OP1_COS, tgsi_trig},
9858 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
9859 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
9860 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
9861 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_pk2h},
9862 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
9863 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
9864 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
9865 [44] = { ALU_OP0_NOP, tgsi_unsupported},
9866 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
9867 [46] = { ALU_OP0_NOP, tgsi_unsupported},
9868 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
9869 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, tgsi_trig},
9870 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
9871 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
9872 [51] = { ALU_OP0_NOP, tgsi_unsupported},
9873 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
9874 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
9875 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
9876 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_up2h},
9877 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
9878 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
9879 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
9880 [59] = { ALU_OP0_NOP, tgsi_unsupported},
9881 [60] = { ALU_OP0_NOP, tgsi_unsupported},
9882 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_eg_arl},
9883 [62] = { ALU_OP0_NOP, tgsi_unsupported},
9884 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
9885 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
9886 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
9887 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
9888 [67] = { ALU_OP0_NOP, tgsi_unsupported},
9889 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
9890 [69] = { ALU_OP0_NOP, tgsi_unsupported},
9891 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
9892 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
9893 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
9894 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
9895 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
9896 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
9897 [76] = { ALU_OP0_NOP, tgsi_unsupported},
9898 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
9899 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
9900 [TGSI_OPCODE_DDX_FINE] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
9901 [TGSI_OPCODE_DDY_FINE] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
9902 [82] = { ALU_OP0_NOP, tgsi_unsupported},
9903 [83] = { ALU_OP0_NOP, tgsi_unsupported},
9904 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
9905 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2_trans},
9906 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
9907 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
9908 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2},
9909 [88] = { ALU_OP0_NOP, tgsi_unsupported},
9910 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
9911 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
9912 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
9913 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
9914 [93] = { ALU_OP0_NOP, tgsi_unsupported},
9915 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
9916 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
9917 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
9918 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
9919 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
9920 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
9921 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
9922 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
9923 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
9924 [103] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
9925 [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
9926 [TGSI_OPCODE_RESQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_resq},
9927 [106] = { ALU_OP0_NOP, tgsi_unsupported},
9928 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
9929 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
9930 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
9931 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
9932 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
9933 [TGSI_OPCODE_MEMBAR] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier},
9934 [113] = { ALU_OP0_NOP, tgsi_unsupported},
9935 [114] = { ALU_OP0_NOP, tgsi_unsupported},
9936 [115] = { ALU_OP0_NOP, tgsi_unsupported},
9937 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
9938 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
9939 /* Refer below for TGSI_OPCODE_DFMA */
9940 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_f2i},
9941 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
9942 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
9943 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
9944 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
9945 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
9946 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2},
9947 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
9948 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_f2i},
9949 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2_trans},
9950 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
9951 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
9952 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
9953 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
9954 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
9955 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
9956 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_UINT, tgsi_op2_trans},
9957 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
9958 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
9959 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2},
9960 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
9961 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2},
9962 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
9963 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
9964 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
9965 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
9966 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
9967 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
9968 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
9969 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
9970 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
9971 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
9972 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
9973 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
9974 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
9975 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
9976 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
9977 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
9978 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_eg_arl},
9979 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
9980 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
9981 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
9982 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_load},
9983 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_store},
9984 [163] = { ALU_OP0_NOP, tgsi_unsupported},
9985 [164] = { ALU_OP0_NOP, tgsi_unsupported},
9986 [165] = { ALU_OP0_NOP, tgsi_unsupported},
9987 [TGSI_OPCODE_BARRIER] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier},
9988 [TGSI_OPCODE_ATOMUADD] = { V_RAT_INST_ADD_RTN, tgsi_atomic_op},
9989 [TGSI_OPCODE_ATOMXCHG] = { V_RAT_INST_XCHG_RTN, tgsi_atomic_op},
9990 [TGSI_OPCODE_ATOMCAS] = { V_RAT_INST_CMPXCHG_INT_RTN, tgsi_atomic_op},
9991 [TGSI_OPCODE_ATOMAND] = { V_RAT_INST_AND_RTN, tgsi_atomic_op},
9992 [TGSI_OPCODE_ATOMOR] = { V_RAT_INST_OR_RTN, tgsi_atomic_op},
9993 [TGSI_OPCODE_ATOMXOR] = { V_RAT_INST_XOR_RTN, tgsi_atomic_op},
9994 [TGSI_OPCODE_ATOMUMIN] = { V_RAT_INST_MIN_UINT_RTN, tgsi_atomic_op},
9995 [TGSI_OPCODE_ATOMUMAX] = { V_RAT_INST_MAX_UINT_RTN, tgsi_atomic_op},
9996 [TGSI_OPCODE_ATOMIMIN] = { V_RAT_INST_MIN_INT_RTN, tgsi_atomic_op},
9997 [TGSI_OPCODE_ATOMIMAX] = { V_RAT_INST_MAX_INT_RTN, tgsi_atomic_op},
9998 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
9999 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
10000 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
10001 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, tgsi_op2_trans},
10002 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, tgsi_op2_trans},
10003 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_tex},
10004 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_tex},
10005 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_bfe},
10006 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_bfe},
10007 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_bfi},
10008 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_op2},
10009 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_op2},
10010 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_op2},
10011 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_msb},
10012 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_msb},
10013 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_interp_egcm},
10014 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_interp_egcm},
10015 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_interp_egcm},
10016 [TGSI_OPCODE_F2D] = { ALU_OP1_FLT32_TO_FLT64, tgsi_op2_64},
10017 [TGSI_OPCODE_D2F] = { ALU_OP1_FLT64_TO_FLT32, tgsi_op2_64_single_dest},
10018 [TGSI_OPCODE_DABS] = { ALU_OP1_MOV, tgsi_op2_64},
10019 [TGSI_OPCODE_DNEG] = { ALU_OP2_ADD_64, tgsi_dneg},
10020 [TGSI_OPCODE_DADD] = { ALU_OP2_ADD_64, tgsi_op2_64},
10021 [TGSI_OPCODE_DMUL] = { ALU_OP2_MUL_64, cayman_mul_double_instr},
10022 [TGSI_OPCODE_DDIV] = { 0, cayman_ddiv_instr },
10023 [TGSI_OPCODE_DMAX] = { ALU_OP2_MAX_64, tgsi_op2_64},
10024 [TGSI_OPCODE_DMIN] = { ALU_OP2_MIN_64, tgsi_op2_64},
10025 [TGSI_OPCODE_DSLT] = { ALU_OP2_SETGT_64, tgsi_op2_64_single_dest_s},
10026 [TGSI_OPCODE_DSGE] = { ALU_OP2_SETGE_64, tgsi_op2_64_single_dest},
10027 [TGSI_OPCODE_DSEQ] = { ALU_OP2_SETE_64, tgsi_op2_64_single_dest},
10028 [TGSI_OPCODE_DSNE] = { ALU_OP2_SETNE_64, tgsi_op2_64_single_dest},
10029 [TGSI_OPCODE_DRCP] = { ALU_OP2_RECIP_64, cayman_emit_double_instr},
10030 [TGSI_OPCODE_DSQRT] = { ALU_OP2_SQRT_64, cayman_emit_double_instr},
10031 [TGSI_OPCODE_DMAD] = { ALU_OP3_FMA_64, tgsi_op3_64},
10032 [TGSI_OPCODE_DFMA] = { ALU_OP3_FMA_64, tgsi_op3_64},
10033 [TGSI_OPCODE_DFRAC] = { ALU_OP1_FRACT_64, tgsi_op2_64},
10034 [TGSI_OPCODE_DLDEXP] = { ALU_OP2_LDEXP_64, tgsi_op2_64},
10035 [TGSI_OPCODE_DFRACEXP] = { ALU_OP1_FREXP_64, tgsi_dfracexp},
10036 [TGSI_OPCODE_D2I] = { ALU_OP1_FLT_TO_INT, egcm_double_to_int},
10037 [TGSI_OPCODE_I2D] = { ALU_OP1_INT_TO_FLT, egcm_int_to_double},
10038 [TGSI_OPCODE_D2U] = { ALU_OP1_FLT_TO_UINT, egcm_double_to_int},
10039 [TGSI_OPCODE_U2D] = { ALU_OP1_UINT_TO_FLT, egcm_int_to_double},
10040 [TGSI_OPCODE_DRSQ] = { ALU_OP2_RECIPSQRT_64, cayman_emit_double_instr},
10041 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
10042 };
10043
10044 static const struct r600_shader_tgsi_instruction cm_shader_tgsi_instruction[] = {
10045 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_eg_arl},
10046 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
10047 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
10048 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, cayman_emit_float_instr},
10049 [TGSI_OPCODE_RSQ] = { ALU_OP1_RECIPSQRT_IEEE, cayman_emit_float_instr},
10050 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
10051 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
10052 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL_IEEE, tgsi_op2},
10053 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
10054 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
10055 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
10056 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
10057 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN_DX10, tgsi_op2},
10058 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX_DX10, tgsi_op2},
10059 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
10060 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
10061 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD_IEEE, tgsi_op3},
10062 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
10063 [TGSI_OPCODE_FMA] = { ALU_OP3_FMA, tgsi_op3},
10064 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, cayman_emit_float_instr},
10065 [21] = { ALU_OP0_NOP, tgsi_unsupported},
10066 [22] = { ALU_OP0_NOP, tgsi_unsupported},
10067 [23] = { ALU_OP0_NOP, tgsi_unsupported},
10068 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
10069 [25] = { ALU_OP0_NOP, tgsi_unsupported},
10070 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
10071 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
10072 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, cayman_emit_float_instr},
10073 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, cayman_emit_float_instr},
10074 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, cayman_pow},
10075 [31] = { ALU_OP0_NOP, tgsi_unsupported},
10076 [32] = { ALU_OP0_NOP, tgsi_unsupported},
10077 [33] = { ALU_OP0_NOP, tgsi_unsupported},
10078 [34] = { ALU_OP0_NOP, tgsi_unsupported},
10079 [35] = { ALU_OP0_NOP, tgsi_unsupported},
10080 [TGSI_OPCODE_COS] = { ALU_OP1_COS, cayman_trig},
10081 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
10082 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
10083 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
10084 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_pk2h},
10085 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
10086 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
10087 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
10088 [44] = { ALU_OP0_NOP, tgsi_unsupported},
10089 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
10090 [46] = { ALU_OP0_NOP, tgsi_unsupported},
10091 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
10092 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, cayman_trig},
10093 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
10094 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
10095 [51] = { ALU_OP0_NOP, tgsi_unsupported},
10096 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
10097 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
10098 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
10099 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_up2h},
10100 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
10101 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
10102 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
10103 [59] = { ALU_OP0_NOP, tgsi_unsupported},
10104 [60] = { ALU_OP0_NOP, tgsi_unsupported},
10105 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_eg_arl},
10106 [62] = { ALU_OP0_NOP, tgsi_unsupported},
10107 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
10108 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
10109 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
10110 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
10111 [67] = { ALU_OP0_NOP, tgsi_unsupported},
10112 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
10113 [69] = { ALU_OP0_NOP, tgsi_unsupported},
10114 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
10115 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
10116 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
10117 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
10118 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
10119 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
10120 [76] = { ALU_OP0_NOP, tgsi_unsupported},
10121 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
10122 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
10123 [TGSI_OPCODE_DDX_FINE] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
10124 [TGSI_OPCODE_DDY_FINE] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
10125 [82] = { ALU_OP0_NOP, tgsi_unsupported},
10126 [83] = { ALU_OP0_NOP, tgsi_unsupported},
10127 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
10128 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2},
10129 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
10130 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
10131 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2},
10132 [88] = { ALU_OP0_NOP, tgsi_unsupported},
10133 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
10134 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
10135 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
10136 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
10137 [93] = { ALU_OP0_NOP, tgsi_unsupported},
10138 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
10139 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
10140 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
10141 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
10142 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
10143 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
10144 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
10145 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
10146 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
10147 [103] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
10148 [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
10149 [TGSI_OPCODE_RESQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_resq},
10150 [106] = { ALU_OP0_NOP, tgsi_unsupported},
10151 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
10152 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
10153 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
10154 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
10155 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
10156 [TGSI_OPCODE_MEMBAR] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier},
10157 [113] = { ALU_OP0_NOP, tgsi_unsupported},
10158 [114] = { ALU_OP0_NOP, tgsi_unsupported},
10159 [115] = { ALU_OP0_NOP, tgsi_unsupported},
10160 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
10161 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
10162 /* Refer below for TGSI_OPCODE_DFMA */
10163 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_op2},
10164 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
10165 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
10166 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
10167 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
10168 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
10169 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2},
10170 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
10171 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_op2},
10172 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2},
10173 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
10174 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
10175 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
10176 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
10177 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
10178 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
10179 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_INT, cayman_mul_int_instr},
10180 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
10181 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
10182 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2},
10183 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
10184 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2},
10185 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
10186 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
10187 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
10188 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
10189 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
10190 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
10191 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
10192 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
10193 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
10194 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
10195 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
10196 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
10197 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
10198 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
10199 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
10200 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
10201 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_eg_arl},
10202 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
10203 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
10204 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
10205 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_load},
10206 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_store},
10207 [163] = { ALU_OP0_NOP, tgsi_unsupported},
10208 [164] = { ALU_OP0_NOP, tgsi_unsupported},
10209 [165] = { ALU_OP0_NOP, tgsi_unsupported},
10210 [TGSI_OPCODE_BARRIER] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier},
10211 [TGSI_OPCODE_ATOMUADD] = { V_RAT_INST_ADD_RTN, tgsi_atomic_op},
10212 [TGSI_OPCODE_ATOMXCHG] = { V_RAT_INST_XCHG_RTN, tgsi_atomic_op},
10213 [TGSI_OPCODE_ATOMCAS] = { V_RAT_INST_CMPXCHG_INT_RTN, tgsi_atomic_op},
10214 [TGSI_OPCODE_ATOMAND] = { V_RAT_INST_AND_RTN, tgsi_atomic_op},
10215 [TGSI_OPCODE_ATOMOR] = { V_RAT_INST_OR_RTN, tgsi_atomic_op},
10216 [TGSI_OPCODE_ATOMXOR] = { V_RAT_INST_XOR_RTN, tgsi_atomic_op},
10217 [TGSI_OPCODE_ATOMUMIN] = { V_RAT_INST_MIN_UINT_RTN, tgsi_atomic_op},
10218 [TGSI_OPCODE_ATOMUMAX] = { V_RAT_INST_MAX_UINT_RTN, tgsi_atomic_op},
10219 [TGSI_OPCODE_ATOMIMIN] = { V_RAT_INST_MIN_INT_RTN, tgsi_atomic_op},
10220 [TGSI_OPCODE_ATOMIMAX] = { V_RAT_INST_MAX_INT_RTN, tgsi_atomic_op},
10221 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
10222 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
10223 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
10224 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, cayman_mul_int_instr},
10225 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, cayman_mul_int_instr},
10226 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_tex},
10227 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_tex},
10228 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_bfe},
10229 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_bfe},
10230 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_bfi},
10231 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_op2},
10232 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_op2},
10233 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_op2},
10234 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_msb},
10235 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_msb},
10236 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_interp_egcm},
10237 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_interp_egcm},
10238 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_interp_egcm},
10239 [TGSI_OPCODE_F2D] = { ALU_OP1_FLT32_TO_FLT64, tgsi_op2_64},
10240 [TGSI_OPCODE_D2F] = { ALU_OP1_FLT64_TO_FLT32, tgsi_op2_64_single_dest},
10241 [TGSI_OPCODE_DABS] = { ALU_OP1_MOV, tgsi_op2_64},
10242 [TGSI_OPCODE_DNEG] = { ALU_OP2_ADD_64, tgsi_dneg},
10243 [TGSI_OPCODE_DADD] = { ALU_OP2_ADD_64, tgsi_op2_64},
10244 [TGSI_OPCODE_DMUL] = { ALU_OP2_MUL_64, cayman_mul_double_instr},
10245 [TGSI_OPCODE_DDIV] = { 0, cayman_ddiv_instr },
10246 [TGSI_OPCODE_DMAX] = { ALU_OP2_MAX_64, tgsi_op2_64},
10247 [TGSI_OPCODE_DMIN] = { ALU_OP2_MIN_64, tgsi_op2_64},
10248 [TGSI_OPCODE_DSLT] = { ALU_OP2_SETGT_64, tgsi_op2_64_single_dest_s},
10249 [TGSI_OPCODE_DSGE] = { ALU_OP2_SETGE_64, tgsi_op2_64_single_dest},
10250 [TGSI_OPCODE_DSEQ] = { ALU_OP2_SETE_64, tgsi_op2_64_single_dest},
10251 [TGSI_OPCODE_DSNE] = { ALU_OP2_SETNE_64, tgsi_op2_64_single_dest},
10252 [TGSI_OPCODE_DRCP] = { ALU_OP2_RECIP_64, cayman_emit_double_instr},
10253 [TGSI_OPCODE_DSQRT] = { ALU_OP2_SQRT_64, cayman_emit_double_instr},
10254 [TGSI_OPCODE_DMAD] = { ALU_OP3_FMA_64, tgsi_op3_64},
10255 [TGSI_OPCODE_DFMA] = { ALU_OP3_FMA_64, tgsi_op3_64},
10256 [TGSI_OPCODE_DFRAC] = { ALU_OP1_FRACT_64, tgsi_op2_64},
10257 [TGSI_OPCODE_DLDEXP] = { ALU_OP2_LDEXP_64, tgsi_op2_64},
10258 [TGSI_OPCODE_DFRACEXP] = { ALU_OP1_FREXP_64, tgsi_dfracexp},
10259 [TGSI_OPCODE_D2I] = { ALU_OP1_FLT_TO_INT, egcm_double_to_int},
10260 [TGSI_OPCODE_I2D] = { ALU_OP1_INT_TO_FLT, egcm_int_to_double},
10261 [TGSI_OPCODE_D2U] = { ALU_OP1_FLT_TO_UINT, egcm_double_to_int},
10262 [TGSI_OPCODE_U2D] = { ALU_OP1_UINT_TO_FLT, egcm_int_to_double},
10263 [TGSI_OPCODE_DRSQ] = { ALU_OP2_RECIPSQRT_64, cayman_emit_double_instr},
10264 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
10265 };