r600g: use tables with ISA info v3
[mesa.git] / src / gallium / drivers / r600 / r600_shader.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "r600_sq.h"
24 #include "r600_llvm.h"
25 #include "r600_formats.h"
26 #include "r600_opcodes.h"
27 #include "r600_shader.h"
28 #include "r600d.h"
29
30 #include "pipe/p_shader_tokens.h"
31 #include "tgsi/tgsi_info.h"
32 #include "tgsi/tgsi_parse.h"
33 #include "tgsi/tgsi_scan.h"
34 #include "tgsi/tgsi_dump.h"
35 #include "util/u_memory.h"
36 #include <stdio.h>
37 #include <errno.h>
38 #include <byteswap.h>
39
40 /* CAYMAN notes
41 Why CAYMAN got loops for lots of instructions is explained here.
42
43 -These 8xx t-slot only ops are implemented in all vector slots.
44 MUL_LIT, FLT_TO_UINT, INT_TO_FLT, UINT_TO_FLT
45 These 8xx t-slot only opcodes become vector ops, with all four
46 slots expecting the arguments on sources a and b. Result is
47 broadcast to all channels.
48 MULLO_INT, MULHI_INT, MULLO_UINT, MULHI_UINT
49 These 8xx t-slot only opcodes become vector ops in the z, y, and
50 x slots.
51 EXP_IEEE, LOG_IEEE/CLAMPED, RECIP_IEEE/CLAMPED/FF/INT/UINT/_64/CLAMPED_64
52 RECIPSQRT_IEEE/CLAMPED/FF/_64/CLAMPED_64
53 SQRT_IEEE/_64
54 SIN/COS
55 The w slot may have an independent co-issued operation, or if the
56 result is required to be in the w slot, the opcode above may be
57 issued in the w slot as well.
58 The compiler must issue the source argument to slots z, y, and x
59 */
60
61 static int r600_pipe_shader(struct pipe_context *ctx, struct r600_pipe_shader *shader)
62 {
63 struct r600_context *rctx = (struct r600_context *)ctx;
64 struct r600_shader *rshader = &shader->shader;
65 uint32_t *ptr;
66 int i;
67
68 /* copy new shader */
69 if (shader->bo == NULL) {
70 shader->bo = (struct r600_resource*)
71 pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE, rshader->bc.ndw * 4);
72 if (shader->bo == NULL) {
73 return -ENOMEM;
74 }
75 ptr = r600_buffer_mmap_sync_with_rings(rctx, shader->bo, PIPE_TRANSFER_WRITE);
76 if (R600_BIG_ENDIAN) {
77 for (i = 0; i < rshader->bc.ndw; ++i) {
78 ptr[i] = bswap_32(rshader->bc.bytecode[i]);
79 }
80 } else {
81 memcpy(ptr, rshader->bc.bytecode, rshader->bc.ndw * sizeof(*ptr));
82 }
83 rctx->ws->buffer_unmap(shader->bo->cs_buf);
84 }
85 /* build state */
86 switch (rshader->processor_type) {
87 case TGSI_PROCESSOR_VERTEX:
88 if (rctx->chip_class >= EVERGREEN) {
89 evergreen_pipe_shader_vs(ctx, shader);
90 } else {
91 r600_pipe_shader_vs(ctx, shader);
92 }
93 break;
94 case TGSI_PROCESSOR_FRAGMENT:
95 if (rctx->chip_class >= EVERGREEN) {
96 evergreen_pipe_shader_ps(ctx, shader);
97 } else {
98 r600_pipe_shader_ps(ctx, shader);
99 }
100 break;
101 default:
102 return -EINVAL;
103 }
104 return 0;
105 }
106
107 static int r600_shader_from_tgsi(struct r600_screen *rscreen,
108 struct r600_pipe_shader *pipeshader,
109 struct r600_shader_key key);
110
111 static void r600_dump_streamout(struct pipe_stream_output_info *so)
112 {
113 unsigned i;
114
115 fprintf(stderr, "STREAMOUT\n");
116 for (i = 0; i < so->num_outputs; i++) {
117 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
118 so->output[i].start_component;
119 fprintf(stderr, " %i: MEM_STREAM0_BUF%i[%i..%i] <- OUT[%i].%s%s%s%s%s\n",
120 i, so->output[i].output_buffer,
121 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
122 so->output[i].register_index,
123 mask & 1 ? "x" : "",
124 mask & 2 ? "y" : "",
125 mask & 4 ? "z" : "",
126 mask & 8 ? "w" : "",
127 so->output[i].dst_offset < so->output[i].start_component ? " (will lower)" : "");
128 }
129 }
130
131 int r600_pipe_shader_create(struct pipe_context *ctx,
132 struct r600_pipe_shader *shader,
133 struct r600_shader_key key)
134 {
135 static int dump_shaders = -1;
136 struct r600_context *rctx = (struct r600_context *)ctx;
137 struct r600_pipe_shader_selector *sel = shader->selector;
138 int r;
139
140 shader->shader.bc.isa = rctx->isa;
141
142 /* Would like some magic "get_bool_option_once" routine.
143 */
144 if (dump_shaders == -1)
145 dump_shaders = debug_get_bool_option("R600_DUMP_SHADERS", FALSE);
146
147 if (dump_shaders) {
148 fprintf(stderr, "--------------------------------------------------------------\n");
149 tgsi_dump(sel->tokens, 0);
150
151 if (sel->so.num_outputs) {
152 r600_dump_streamout(&sel->so);
153 }
154 }
155 r = r600_shader_from_tgsi(rctx->screen, shader, key);
156 if (r) {
157 R600_ERR("translation from TGSI failed !\n");
158 return r;
159 }
160 r = r600_bytecode_build(&shader->shader.bc);
161 if (r) {
162 R600_ERR("building bytecode failed !\n");
163 return r;
164 }
165 if (dump_shaders) {
166 r600_bytecode_dump(&shader->shader.bc);
167 fprintf(stderr, "______________________________________________________________\n");
168 }
169 return r600_pipe_shader(ctx, shader);
170 }
171
172 void r600_pipe_shader_destroy(struct pipe_context *ctx, struct r600_pipe_shader *shader)
173 {
174 pipe_resource_reference((struct pipe_resource**)&shader->bo, NULL);
175 r600_bytecode_clear(&shader->shader.bc);
176 }
177
178 /*
179 * tgsi -> r600 shader
180 */
181 struct r600_shader_tgsi_instruction;
182
183 struct r600_shader_src {
184 unsigned sel;
185 unsigned swizzle[4];
186 unsigned neg;
187 unsigned abs;
188 unsigned rel;
189 unsigned kc_bank;
190 uint32_t value[4];
191 };
192
193 struct r600_shader_ctx {
194 struct tgsi_shader_info info;
195 struct tgsi_parse_context parse;
196 const struct tgsi_token *tokens;
197 unsigned type;
198 unsigned file_offset[TGSI_FILE_COUNT];
199 unsigned temp_reg;
200 struct r600_shader_tgsi_instruction *inst_info;
201 struct r600_bytecode *bc;
202 struct r600_shader *shader;
203 struct r600_shader_src src[4];
204 uint32_t *literals;
205 uint32_t nliterals;
206 uint32_t max_driver_temp_used;
207 boolean use_llvm;
208 /* needed for evergreen interpolation */
209 boolean input_centroid;
210 boolean input_linear;
211 boolean input_perspective;
212 int num_interp_gpr;
213 int face_gpr;
214 int colors_used;
215 boolean clip_vertex_write;
216 unsigned cv_output;
217 int fragcoord_input;
218 int native_integers;
219 };
220
221 struct r600_shader_tgsi_instruction {
222 unsigned tgsi_opcode;
223 unsigned is_op3;
224 unsigned op;
225 int (*process)(struct r600_shader_ctx *ctx);
226 };
227
228 static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[], eg_shader_tgsi_instruction[], cm_shader_tgsi_instruction[];
229 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx);
230 static inline void callstack_check_depth(struct r600_shader_ctx *ctx, unsigned reason, unsigned check_max_only);
231 static void fc_pushlevel(struct r600_shader_ctx *ctx, int type);
232 static int tgsi_else(struct r600_shader_ctx *ctx);
233 static int tgsi_endif(struct r600_shader_ctx *ctx);
234 static int tgsi_bgnloop(struct r600_shader_ctx *ctx);
235 static int tgsi_endloop(struct r600_shader_ctx *ctx);
236 static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx);
237
238 /*
239 * bytestream -> r600 shader
240 *
241 * These functions are used to transform the output of the LLVM backend into
242 * struct r600_bytecode.
243 */
244
245 static void r600_bytecode_from_byte_stream(struct r600_shader_ctx *ctx,
246 unsigned char * bytes, unsigned num_bytes);
247
248 #ifdef HAVE_OPENCL
249 int r600_compute_shader_create(struct pipe_context * ctx,
250 LLVMModuleRef mod, struct r600_bytecode * bytecode)
251 {
252 struct r600_context *r600_ctx = (struct r600_context *)ctx;
253 unsigned char * bytes;
254 unsigned byte_count;
255 struct r600_shader_ctx shader_ctx;
256 unsigned dump = 0;
257
258 if (debug_get_bool_option("R600_DUMP_SHADERS", FALSE)) {
259 dump = 1;
260 }
261
262 r600_llvm_compile(mod, &bytes, &byte_count, r600_ctx->family , dump);
263 shader_ctx.bc = bytecode;
264 r600_bytecode_init(shader_ctx.bc, r600_ctx->chip_class, r600_ctx->family,
265 r600_ctx->screen->msaa_texture_support);
266 shader_ctx.bc->type = TGSI_PROCESSOR_COMPUTE;
267 r600_bytecode_from_byte_stream(&shader_ctx, bytes, byte_count);
268 if (shader_ctx.bc->chip_class == CAYMAN) {
269 cm_bytecode_add_cf_end(shader_ctx.bc);
270 }
271 r600_bytecode_build(shader_ctx.bc);
272 if (dump) {
273 r600_bytecode_dump(shader_ctx.bc);
274 }
275 free(bytes);
276 return 1;
277 }
278
279 #endif /* HAVE_OPENCL */
280
281 static uint32_t i32_from_byte_stream(unsigned char * bytes,
282 unsigned * bytes_read)
283 {
284 unsigned i;
285 uint32_t out = 0;
286 for (i = 0; i < 4; i++) {
287 out |= bytes[(*bytes_read)++] << (8 * i);
288 }
289 return out;
290 }
291
292 static unsigned r600_src_from_byte_stream(unsigned char * bytes,
293 unsigned bytes_read, struct r600_bytecode_alu * alu, unsigned src_idx)
294 {
295 unsigned i;
296 unsigned sel0, sel1;
297 sel0 = bytes[bytes_read++];
298 sel1 = bytes[bytes_read++];
299 alu->src[src_idx].sel = sel0 | (sel1 << 8);
300 alu->src[src_idx].chan = bytes[bytes_read++];
301 alu->src[src_idx].neg = bytes[bytes_read++];
302 alu->src[src_idx].abs = bytes[bytes_read++];
303 alu->src[src_idx].rel = bytes[bytes_read++];
304 alu->src[src_idx].kc_bank = bytes[bytes_read++];
305 for (i = 0; i < 4; i++) {
306 alu->src[src_idx].value |= bytes[bytes_read++] << (i * 8);
307 }
308 return bytes_read;
309 }
310
311 static unsigned r600_alu_from_byte_stream(struct r600_shader_ctx *ctx,
312 unsigned char * bytes, unsigned bytes_read)
313 {
314 unsigned src_idx, src_num;
315 struct r600_bytecode_alu alu;
316 unsigned src_use_sel[3];
317 const struct alu_op_info *alu_op;
318 unsigned src_sel[3] = {};
319 uint32_t word0, word1;
320
321 src_num = bytes[bytes_read++];
322
323 memset(&alu, 0, sizeof(alu));
324 for(src_idx = 0; src_idx < src_num; src_idx++) {
325 unsigned i;
326 src_use_sel[src_idx] = bytes[bytes_read++];
327 for (i = 0; i < 4; i++) {
328 src_sel[src_idx] |= bytes[bytes_read++] << (i * 8);
329 }
330 for (i = 0; i < 4; i++) {
331 alu.src[src_idx].value |= bytes[bytes_read++] << (i * 8);
332 }
333 }
334
335 word0 = i32_from_byte_stream(bytes, &bytes_read);
336 word1 = i32_from_byte_stream(bytes, &bytes_read);
337
338 switch(ctx->bc->chip_class) {
339 default:
340 case R600:
341 r600_bytecode_alu_read(ctx->bc, &alu, word0, word1);
342 break;
343 case R700:
344 case EVERGREEN:
345 case CAYMAN:
346 r700_bytecode_alu_read(ctx->bc, &alu, word0, word1);
347 break;
348 }
349
350 for(src_idx = 0; src_idx < src_num; src_idx++) {
351 if (src_use_sel[src_idx]) {
352 unsigned sel = src_sel[src_idx];
353
354 alu.src[src_idx].chan = sel & 3;
355 sel >>= 2;
356
357 if (sel>=512) { /* constant */
358 sel -= 512;
359 alu.src[src_idx].kc_bank = sel >> 12;
360 alu.src[src_idx].sel = (sel & 4095) + 512;
361 }
362 else {
363 alu.src[src_idx].sel = sel;
364 }
365 }
366 }
367
368 alu_op = r600_isa_alu(alu.op);
369
370 #if HAVE_LLVM < 0x0302
371 if ((alu_op->flags & AF_PRED) && alu_op->src_count == 2) {
372 alu.update_pred = 1;
373 alu.dst.write = 0;
374 alu.src[1].sel = V_SQ_ALU_SRC_0;
375 alu.src[1].chan = 0;
376 alu.last = 1;
377 }
378 #endif
379
380 if (alu_op->flags & AF_MOVA) {
381 ctx->bc->ar_reg = alu.src[0].sel;
382 ctx->bc->ar_chan = alu.src[0].chan;
383 ctx->bc->ar_loaded = 0;
384 return bytes_read;
385 }
386
387 if (alu.execute_mask) {
388 alu.pred_sel = 0;
389 r600_bytecode_add_alu_type(ctx->bc, &alu, CF_OP_ALU_PUSH_BEFORE);
390 } else {
391 r600_bytecode_add_alu(ctx->bc, &alu);
392 }
393
394 /* XXX: Handle other KILL instructions */
395 if (alu_op->flags & AF_KILL) {
396 ctx->shader->uses_kill = 1;
397 /* XXX: This should be enforced in the LLVM backend. */
398 ctx->bc->force_add_cf = 1;
399 }
400 return bytes_read;
401 }
402
403 static void llvm_if(struct r600_shader_ctx *ctx)
404 {
405 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP);
406 fc_pushlevel(ctx, FC_IF);
407 callstack_check_depth(ctx, FC_PUSH_VPM, 0);
408 }
409
410 static void r600_break_from_byte_stream(struct r600_shader_ctx *ctx)
411 {
412 unsigned opcode = TGSI_OPCODE_BRK;
413 if (ctx->bc->chip_class == CAYMAN)
414 ctx->inst_info = &cm_shader_tgsi_instruction[opcode];
415 else if (ctx->bc->chip_class >= EVERGREEN)
416 ctx->inst_info = &eg_shader_tgsi_instruction[opcode];
417 else
418 ctx->inst_info = &r600_shader_tgsi_instruction[opcode];
419 llvm_if(ctx);
420 tgsi_loop_brk_cont(ctx);
421 tgsi_endif(ctx);
422 }
423
424 static unsigned r600_fc_from_byte_stream(struct r600_shader_ctx *ctx,
425 unsigned char * bytes, unsigned bytes_read)
426 {
427 struct r600_bytecode_alu alu;
428 unsigned inst;
429 memset(&alu, 0, sizeof(alu));
430 bytes_read = r600_src_from_byte_stream(bytes, bytes_read, &alu, 0);
431 inst = bytes[bytes_read++];
432 switch (inst) {
433 case 0: /* IF_PREDICATED */
434 llvm_if(ctx);
435 break;
436 case 1: /* ELSE */
437 tgsi_else(ctx);
438 break;
439 case 2: /* ENDIF */
440 tgsi_endif(ctx);
441 break;
442 case 3: /* BGNLOOP */
443 tgsi_bgnloop(ctx);
444 break;
445 case 4: /* ENDLOOP */
446 tgsi_endloop(ctx);
447 break;
448 case 5: /* PREDICATED_BREAK */
449 r600_break_from_byte_stream(ctx);
450 break;
451 case 6: /* CONTINUE */
452 {
453 unsigned opcode = TGSI_OPCODE_CONT;
454 if (ctx->bc->chip_class == CAYMAN) {
455 ctx->inst_info =
456 &cm_shader_tgsi_instruction[opcode];
457 } else if (ctx->bc->chip_class >= EVERGREEN) {
458 ctx->inst_info =
459 &eg_shader_tgsi_instruction[opcode];
460 } else {
461 ctx->inst_info =
462 &r600_shader_tgsi_instruction[opcode];
463 }
464 tgsi_loop_brk_cont(ctx);
465 }
466 break;
467 }
468
469 return bytes_read;
470 }
471
472 static unsigned r600_tex_from_byte_stream(struct r600_shader_ctx *ctx,
473 unsigned char * bytes, unsigned bytes_read)
474 {
475 struct r600_bytecode_tex tex;
476
477 tex.op = r600_isa_fetch_by_opcode(ctx->bc->isa, bytes[bytes_read++]);
478 tex.resource_id = bytes[bytes_read++];
479 tex.src_gpr = bytes[bytes_read++];
480 tex.src_rel = bytes[bytes_read++];
481 tex.dst_gpr = bytes[bytes_read++];
482 tex.dst_rel = bytes[bytes_read++];
483 tex.dst_sel_x = bytes[bytes_read++];
484 tex.dst_sel_y = bytes[bytes_read++];
485 tex.dst_sel_z = bytes[bytes_read++];
486 tex.dst_sel_w = bytes[bytes_read++];
487 tex.lod_bias = bytes[bytes_read++];
488 tex.coord_type_x = bytes[bytes_read++];
489 tex.coord_type_y = bytes[bytes_read++];
490 tex.coord_type_z = bytes[bytes_read++];
491 tex.coord_type_w = bytes[bytes_read++];
492 tex.offset_x = bytes[bytes_read++];
493 tex.offset_y = bytes[bytes_read++];
494 tex.offset_z = bytes[bytes_read++];
495 tex.sampler_id = bytes[bytes_read++];
496 tex.src_sel_x = bytes[bytes_read++];
497 tex.src_sel_y = bytes[bytes_read++];
498 tex.src_sel_z = bytes[bytes_read++];
499 tex.src_sel_w = bytes[bytes_read++];
500
501 tex.inst_mod = 0;
502
503 r600_bytecode_add_tex(ctx->bc, &tex);
504
505 return bytes_read;
506 }
507
508 static int r600_vtx_from_byte_stream(struct r600_shader_ctx *ctx,
509 unsigned char * bytes, unsigned bytes_read)
510 {
511 struct r600_bytecode_vtx vtx;
512
513 uint32_t word0 = i32_from_byte_stream(bytes, &bytes_read);
514 uint32_t word1 = i32_from_byte_stream(bytes, &bytes_read);
515 uint32_t word2 = i32_from_byte_stream(bytes, &bytes_read);
516
517 memset(&vtx, 0, sizeof(vtx));
518
519 /* WORD0 */
520 vtx.op = r600_isa_fetch_by_opcode(ctx->bc->isa,
521 G_SQ_VTX_WORD0_VTX_INST(word0));
522 vtx.fetch_type = G_SQ_VTX_WORD0_FETCH_TYPE(word0);
523 vtx.buffer_id = G_SQ_VTX_WORD0_BUFFER_ID(word0);
524 vtx.src_gpr = G_SQ_VTX_WORD0_SRC_GPR(word0);
525 vtx.src_sel_x = G_SQ_VTX_WORD0_SRC_SEL_X(word0);
526 vtx.mega_fetch_count = G_SQ_VTX_WORD0_MEGA_FETCH_COUNT(word0);
527
528 /* WORD1 */
529 vtx.dst_gpr = G_SQ_VTX_WORD1_GPR_DST_GPR(word1);
530 vtx.dst_sel_x = G_SQ_VTX_WORD1_DST_SEL_X(word1);
531 vtx.dst_sel_y = G_SQ_VTX_WORD1_DST_SEL_Y(word1);
532 vtx.dst_sel_z = G_SQ_VTX_WORD1_DST_SEL_Z(word1);
533 vtx.dst_sel_w = G_SQ_VTX_WORD1_DST_SEL_W(word1);
534 vtx.use_const_fields = G_SQ_VTX_WORD1_USE_CONST_FIELDS(word1);
535 vtx.data_format = G_SQ_VTX_WORD1_DATA_FORMAT(word1);
536 vtx.num_format_all = G_SQ_VTX_WORD1_NUM_FORMAT_ALL(word1);
537 vtx.format_comp_all = G_SQ_VTX_WORD1_FORMAT_COMP_ALL(word1);
538 vtx.srf_mode_all = G_SQ_VTX_WORD1_SRF_MODE_ALL(word1);
539
540 /* WORD 2*/
541 vtx.offset = G_SQ_VTX_WORD2_OFFSET(word2);
542 vtx.endian = G_SQ_VTX_WORD2_ENDIAN_SWAP(word2);
543
544 if (r600_bytecode_add_vtx(ctx->bc, &vtx)) {
545 fprintf(stderr, "Error adding vtx\n");
546 }
547
548 /* Use the Texture Cache for compute shaders*/
549 if (ctx->bc->chip_class >= EVERGREEN &&
550 ctx->bc->type == TGSI_PROCESSOR_COMPUTE) {
551 ctx->bc->cf_last->op = CF_OP_TEX;
552 }
553 return bytes_read;
554 }
555
556 static int r600_export_from_byte_stream(struct r600_shader_ctx *ctx,
557 unsigned char * bytes, unsigned bytes_read)
558 {
559 uint32_t word0 = 0, word1 = 0;
560 struct r600_bytecode_output output;
561 memset(&output, 0, sizeof(struct r600_bytecode_output));
562 word0 = i32_from_byte_stream(bytes, &bytes_read);
563 word1 = i32_from_byte_stream(bytes, &bytes_read);
564 if (ctx->bc->chip_class >= EVERGREEN)
565 eg_bytecode_export_read(ctx->bc, &output, word0,word1);
566 else
567 r600_bytecode_export_read(ctx->bc, &output, word0,word1);
568 r600_bytecode_add_output(ctx->bc, &output);
569 return bytes_read;
570 }
571
572 static void r600_bytecode_from_byte_stream(struct r600_shader_ctx *ctx,
573 unsigned char * bytes, unsigned num_bytes)
574 {
575 unsigned bytes_read = 0;
576 unsigned i, byte;
577 while (bytes_read < num_bytes) {
578 char inst_type = bytes[bytes_read++];
579 switch (inst_type) {
580 case 0:
581 bytes_read = r600_alu_from_byte_stream(ctx, bytes,
582 bytes_read);
583 break;
584 case 1:
585 bytes_read = r600_tex_from_byte_stream(ctx, bytes,
586 bytes_read);
587 break;
588 case 2:
589 bytes_read = r600_fc_from_byte_stream(ctx, bytes,
590 bytes_read);
591 break;
592 case 3:
593 r600_bytecode_add_cfinst(ctx->bc, CF_NATIVE);
594 for (i = 0; i < 2; i++) {
595 for (byte = 0 ; byte < 4; byte++) {
596 ctx->bc->cf_last->isa[i] |=
597 (bytes[bytes_read++] << (byte * 8));
598 }
599 }
600 break;
601
602 case 4:
603 bytes_read = r600_vtx_from_byte_stream(ctx, bytes,
604 bytes_read);
605 break;
606 case 5:
607 bytes_read = r600_export_from_byte_stream(ctx, bytes,
608 bytes_read);
609 break;
610 default:
611 /* XXX: Error here */
612 break;
613 }
614 }
615 }
616
617 /* End bytestream -> r600 shader functions*/
618
619 static int tgsi_is_supported(struct r600_shader_ctx *ctx)
620 {
621 struct tgsi_full_instruction *i = &ctx->parse.FullToken.FullInstruction;
622 int j;
623
624 if (i->Instruction.NumDstRegs > 1) {
625 R600_ERR("too many dst (%d)\n", i->Instruction.NumDstRegs);
626 return -EINVAL;
627 }
628 if (i->Instruction.Predicate) {
629 R600_ERR("predicate unsupported\n");
630 return -EINVAL;
631 }
632 #if 0
633 if (i->Instruction.Label) {
634 R600_ERR("label unsupported\n");
635 return -EINVAL;
636 }
637 #endif
638 for (j = 0; j < i->Instruction.NumSrcRegs; j++) {
639 if (i->Src[j].Register.Dimension) {
640 if (i->Src[j].Register.File != TGSI_FILE_CONSTANT) {
641 R600_ERR("unsupported src %d (dimension %d)\n", j,
642 i->Src[j].Register.Dimension);
643 return -EINVAL;
644 }
645 }
646 }
647 for (j = 0; j < i->Instruction.NumDstRegs; j++) {
648 if (i->Dst[j].Register.Dimension) {
649 R600_ERR("unsupported dst (dimension)\n");
650 return -EINVAL;
651 }
652 }
653 return 0;
654 }
655
656 static void evergreen_interp_assign_ij_index(struct r600_shader_ctx *ctx,
657 int input)
658 {
659 int ij_index = 0;
660
661 if (ctx->shader->input[input].interpolate == TGSI_INTERPOLATE_PERSPECTIVE) {
662 if (ctx->shader->input[input].centroid)
663 ij_index++;
664 } else if (ctx->shader->input[input].interpolate == TGSI_INTERPOLATE_LINEAR) {
665 /* if we have perspective add one */
666 if (ctx->input_perspective) {
667 ij_index++;
668 /* if we have perspective centroid */
669 if (ctx->input_centroid)
670 ij_index++;
671 }
672 if (ctx->shader->input[input].centroid)
673 ij_index++;
674 }
675
676 ctx->shader->input[input].ij_index = ij_index;
677 }
678
679 static int evergreen_interp_alu(struct r600_shader_ctx *ctx, int input)
680 {
681 int i, r;
682 struct r600_bytecode_alu alu;
683 int gpr = 0, base_chan = 0;
684 int ij_index = ctx->shader->input[input].ij_index;
685
686 /* work out gpr and base_chan from index */
687 gpr = ij_index / 2;
688 base_chan = (2 * (ij_index % 2)) + 1;
689
690 for (i = 0; i < 8; i++) {
691 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
692
693 if (i < 4)
694 alu.op = ALU_OP2_INTERP_ZW;
695 else
696 alu.op = ALU_OP2_INTERP_XY;
697
698 if ((i > 1) && (i < 6)) {
699 alu.dst.sel = ctx->shader->input[input].gpr;
700 alu.dst.write = 1;
701 }
702
703 alu.dst.chan = i % 4;
704
705 alu.src[0].sel = gpr;
706 alu.src[0].chan = (base_chan - (i % 2));
707
708 alu.src[1].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
709
710 alu.bank_swizzle_force = SQ_ALU_VEC_210;
711 if ((i % 4) == 3)
712 alu.last = 1;
713 r = r600_bytecode_add_alu(ctx->bc, &alu);
714 if (r)
715 return r;
716 }
717 return 0;
718 }
719
720 static int evergreen_interp_flat(struct r600_shader_ctx *ctx, int input)
721 {
722 int i, r;
723 struct r600_bytecode_alu alu;
724
725 for (i = 0; i < 4; i++) {
726 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
727
728 alu.op = ALU_OP1_INTERP_LOAD_P0;
729
730 alu.dst.sel = ctx->shader->input[input].gpr;
731 alu.dst.write = 1;
732
733 alu.dst.chan = i;
734
735 alu.src[0].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
736 alu.src[0].chan = i;
737
738 if (i == 3)
739 alu.last = 1;
740 r = r600_bytecode_add_alu(ctx->bc, &alu);
741 if (r)
742 return r;
743 }
744 return 0;
745 }
746
747 /*
748 * Special export handling in shaders
749 *
750 * shader export ARRAY_BASE for EXPORT_POS:
751 * 60 is position
752 * 61 is misc vector
753 * 62, 63 are clip distance vectors
754 *
755 * The use of the values exported in 61-63 are controlled by PA_CL_VS_OUT_CNTL:
756 * VS_OUT_MISC_VEC_ENA - enables the use of all fields in export 61
757 * USE_VTX_POINT_SIZE - point size in the X channel of export 61
758 * USE_VTX_EDGE_FLAG - edge flag in the Y channel of export 61
759 * USE_VTX_RENDER_TARGET_INDX - render target index in the Z channel of export 61
760 * USE_VTX_VIEWPORT_INDX - viewport index in the W channel of export 61
761 * USE_VTX_KILL_FLAG - kill flag in the Z channel of export 61 (mutually
762 * exclusive from render target index)
763 * VS_OUT_CCDIST0_VEC_ENA/VS_OUT_CCDIST1_VEC_ENA - enable clip distance vectors
764 *
765 *
766 * shader export ARRAY_BASE for EXPORT_PIXEL:
767 * 0-7 CB targets
768 * 61 computed Z vector
769 *
770 * The use of the values exported in the computed Z vector are controlled
771 * by DB_SHADER_CONTROL:
772 * Z_EXPORT_ENABLE - Z as a float in RED
773 * STENCIL_REF_EXPORT_ENABLE - stencil ref as int in GREEN
774 * COVERAGE_TO_MASK_ENABLE - alpha to mask in ALPHA
775 * MASK_EXPORT_ENABLE - pixel sample mask in BLUE
776 * DB_SOURCE_FORMAT - export control restrictions
777 *
778 */
779
780
781 /* Map name/sid pair from tgsi to the 8-bit semantic index for SPI setup */
782 static int r600_spi_sid(struct r600_shader_io * io)
783 {
784 int index, name = io->name;
785
786 /* These params are handled differently, they don't need
787 * semantic indices, so we'll use 0 for them.
788 */
789 if (name == TGSI_SEMANTIC_POSITION ||
790 name == TGSI_SEMANTIC_PSIZE ||
791 name == TGSI_SEMANTIC_FACE)
792 index = 0;
793 else {
794 if (name == TGSI_SEMANTIC_GENERIC) {
795 /* For generic params simply use sid from tgsi */
796 index = io->sid;
797 } else {
798 /* For non-generic params - pack name and sid into 8 bits */
799 index = 0x80 | (name<<3) | (io->sid);
800 }
801
802 /* Make sure that all really used indices have nonzero value, so
803 * we can just compare it to 0 later instead of comparing the name
804 * with different values to detect special cases. */
805 index++;
806 }
807
808 return index;
809 };
810
811 /* turn input into interpolate on EG */
812 static int evergreen_interp_input(struct r600_shader_ctx *ctx, int index)
813 {
814 int r = 0;
815
816 if (ctx->shader->input[index].spi_sid) {
817 ctx->shader->input[index].lds_pos = ctx->shader->nlds++;
818 if (ctx->shader->input[index].interpolate > 0) {
819 evergreen_interp_assign_ij_index(ctx, index);
820 if (!ctx->use_llvm)
821 r = evergreen_interp_alu(ctx, index);
822 } else {
823 if (!ctx->use_llvm)
824 r = evergreen_interp_flat(ctx, index);
825 }
826 }
827 return r;
828 }
829
830 static int select_twoside_color(struct r600_shader_ctx *ctx, int front, int back)
831 {
832 struct r600_bytecode_alu alu;
833 int i, r;
834 int gpr_front = ctx->shader->input[front].gpr;
835 int gpr_back = ctx->shader->input[back].gpr;
836
837 for (i = 0; i < 4; i++) {
838 memset(&alu, 0, sizeof(alu));
839 alu.op = ALU_OP3_CNDGT;
840 alu.is_op3 = 1;
841 alu.dst.write = 1;
842 alu.dst.sel = gpr_front;
843 alu.src[0].sel = ctx->face_gpr;
844 alu.src[1].sel = gpr_front;
845 alu.src[2].sel = gpr_back;
846
847 alu.dst.chan = i;
848 alu.src[1].chan = i;
849 alu.src[2].chan = i;
850 alu.last = (i==3);
851
852 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
853 return r;
854 }
855
856 return 0;
857 }
858
859 static int tgsi_declaration(struct r600_shader_ctx *ctx)
860 {
861 struct tgsi_full_declaration *d = &ctx->parse.FullToken.FullDeclaration;
862 unsigned i;
863 int r;
864
865 switch (d->Declaration.File) {
866 case TGSI_FILE_INPUT:
867 i = ctx->shader->ninput++;
868 ctx->shader->input[i].name = d->Semantic.Name;
869 ctx->shader->input[i].sid = d->Semantic.Index;
870 ctx->shader->input[i].interpolate = d->Interp.Interpolate;
871 ctx->shader->input[i].centroid = d->Interp.Centroid;
872 ctx->shader->input[i].gpr = ctx->file_offset[TGSI_FILE_INPUT] + d->Range.First;
873 if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
874 ctx->shader->input[i].spi_sid = r600_spi_sid(&ctx->shader->input[i]);
875 switch (ctx->shader->input[i].name) {
876 case TGSI_SEMANTIC_FACE:
877 ctx->face_gpr = ctx->shader->input[i].gpr;
878 break;
879 case TGSI_SEMANTIC_COLOR:
880 ctx->colors_used++;
881 break;
882 case TGSI_SEMANTIC_POSITION:
883 ctx->fragcoord_input = i;
884 break;
885 }
886 if (ctx->bc->chip_class >= EVERGREEN) {
887 if ((r = evergreen_interp_input(ctx, i)))
888 return r;
889 }
890 }
891 break;
892 case TGSI_FILE_OUTPUT:
893 i = ctx->shader->noutput++;
894 ctx->shader->output[i].name = d->Semantic.Name;
895 ctx->shader->output[i].sid = d->Semantic.Index;
896 ctx->shader->output[i].gpr = ctx->file_offset[TGSI_FILE_OUTPUT] + d->Range.First;
897 ctx->shader->output[i].interpolate = d->Interp.Interpolate;
898 ctx->shader->output[i].write_mask = d->Declaration.UsageMask;
899 if (ctx->type == TGSI_PROCESSOR_VERTEX) {
900 ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);
901 switch (d->Semantic.Name) {
902 case TGSI_SEMANTIC_CLIPDIST:
903 ctx->shader->clip_dist_write |= d->Declaration.UsageMask << (d->Semantic.Index << 2);
904 break;
905 case TGSI_SEMANTIC_PSIZE:
906 ctx->shader->vs_out_misc_write = 1;
907 ctx->shader->vs_out_point_size = 1;
908 break;
909 case TGSI_SEMANTIC_CLIPVERTEX:
910 ctx->clip_vertex_write = TRUE;
911 ctx->cv_output = i;
912 break;
913 }
914 } else if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
915 switch (d->Semantic.Name) {
916 case TGSI_SEMANTIC_COLOR:
917 ctx->shader->nr_ps_max_color_exports++;
918 break;
919 }
920 }
921 break;
922 case TGSI_FILE_CONSTANT:
923 case TGSI_FILE_TEMPORARY:
924 case TGSI_FILE_SAMPLER:
925 case TGSI_FILE_ADDRESS:
926 break;
927
928 case TGSI_FILE_SYSTEM_VALUE:
929 if (d->Semantic.Name == TGSI_SEMANTIC_INSTANCEID) {
930 if (!ctx->native_integers) {
931 struct r600_bytecode_alu alu;
932 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
933
934 alu.op = ALU_OP1_INT_TO_FLT;
935 alu.src[0].sel = 0;
936 alu.src[0].chan = 3;
937
938 alu.dst.sel = 0;
939 alu.dst.chan = 3;
940 alu.dst.write = 1;
941 alu.last = 1;
942
943 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
944 return r;
945 }
946 break;
947 } else if (d->Semantic.Name == TGSI_SEMANTIC_VERTEXID)
948 break;
949 default:
950 R600_ERR("unsupported file %d declaration\n", d->Declaration.File);
951 return -EINVAL;
952 }
953 return 0;
954 }
955
956 static int r600_get_temp(struct r600_shader_ctx *ctx)
957 {
958 return ctx->temp_reg + ctx->max_driver_temp_used++;
959 }
960
961 /*
962 * for evergreen we need to scan the shader to find the number of GPRs we need to
963 * reserve for interpolation.
964 *
965 * we need to know if we are going to emit
966 * any centroid inputs
967 * if perspective and linear are required
968 */
969 static int evergreen_gpr_count(struct r600_shader_ctx *ctx)
970 {
971 int i;
972 int num_baryc;
973
974 ctx->input_linear = FALSE;
975 ctx->input_perspective = FALSE;
976 ctx->input_centroid = FALSE;
977 ctx->num_interp_gpr = 1;
978
979 /* any centroid inputs */
980 for (i = 0; i < ctx->info.num_inputs; i++) {
981 /* skip position/face */
982 if (ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_POSITION ||
983 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_FACE)
984 continue;
985 if (ctx->info.input_interpolate[i] == TGSI_INTERPOLATE_LINEAR)
986 ctx->input_linear = TRUE;
987 if (ctx->info.input_interpolate[i] == TGSI_INTERPOLATE_PERSPECTIVE)
988 ctx->input_perspective = TRUE;
989 if (ctx->info.input_centroid[i])
990 ctx->input_centroid = TRUE;
991 }
992
993 num_baryc = 0;
994 /* ignoring sample for now */
995 if (ctx->input_perspective)
996 num_baryc++;
997 if (ctx->input_linear)
998 num_baryc++;
999 if (ctx->input_centroid)
1000 num_baryc *= 2;
1001
1002 ctx->num_interp_gpr += (num_baryc + 1) >> 1;
1003
1004 /* XXX PULL MODEL and LINE STIPPLE, FIXED PT POS */
1005 return ctx->num_interp_gpr;
1006 }
1007
1008 static void tgsi_src(struct r600_shader_ctx *ctx,
1009 const struct tgsi_full_src_register *tgsi_src,
1010 struct r600_shader_src *r600_src)
1011 {
1012 memset(r600_src, 0, sizeof(*r600_src));
1013 r600_src->swizzle[0] = tgsi_src->Register.SwizzleX;
1014 r600_src->swizzle[1] = tgsi_src->Register.SwizzleY;
1015 r600_src->swizzle[2] = tgsi_src->Register.SwizzleZ;
1016 r600_src->swizzle[3] = tgsi_src->Register.SwizzleW;
1017 r600_src->neg = tgsi_src->Register.Negate;
1018 r600_src->abs = tgsi_src->Register.Absolute;
1019
1020 if (tgsi_src->Register.File == TGSI_FILE_IMMEDIATE) {
1021 int index;
1022 if ((tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleY) &&
1023 (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleZ) &&
1024 (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleW)) {
1025
1026 index = tgsi_src->Register.Index * 4 + tgsi_src->Register.SwizzleX;
1027 r600_bytecode_special_constants(ctx->literals[index], &r600_src->sel, &r600_src->neg);
1028 if (r600_src->sel != V_SQ_ALU_SRC_LITERAL)
1029 return;
1030 }
1031 index = tgsi_src->Register.Index;
1032 r600_src->sel = V_SQ_ALU_SRC_LITERAL;
1033 memcpy(r600_src->value, ctx->literals + index * 4, sizeof(r600_src->value));
1034 } else if (tgsi_src->Register.File == TGSI_FILE_SYSTEM_VALUE) {
1035 if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INSTANCEID) {
1036 r600_src->swizzle[0] = 3;
1037 r600_src->swizzle[1] = 3;
1038 r600_src->swizzle[2] = 3;
1039 r600_src->swizzle[3] = 3;
1040 r600_src->sel = 0;
1041 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_VERTEXID) {
1042 r600_src->swizzle[0] = 0;
1043 r600_src->swizzle[1] = 0;
1044 r600_src->swizzle[2] = 0;
1045 r600_src->swizzle[3] = 0;
1046 r600_src->sel = 0;
1047 }
1048 } else {
1049 if (tgsi_src->Register.Indirect)
1050 r600_src->rel = V_SQ_REL_RELATIVE;
1051 r600_src->sel = tgsi_src->Register.Index;
1052 r600_src->sel += ctx->file_offset[tgsi_src->Register.File];
1053 }
1054 if (tgsi_src->Register.File == TGSI_FILE_CONSTANT) {
1055 if (tgsi_src->Register.Dimension) {
1056 r600_src->kc_bank = tgsi_src->Dimension.Index;
1057 }
1058 }
1059 }
1060
1061 static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx, unsigned int cb_idx, unsigned int offset, unsigned int dst_reg)
1062 {
1063 struct r600_bytecode_vtx vtx;
1064 unsigned int ar_reg;
1065 int r;
1066
1067 if (offset) {
1068 struct r600_bytecode_alu alu;
1069
1070 memset(&alu, 0, sizeof(alu));
1071
1072 alu.op = ALU_OP2_ADD_INT;
1073 alu.src[0].sel = ctx->bc->ar_reg;
1074
1075 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
1076 alu.src[1].value = offset;
1077
1078 alu.dst.sel = dst_reg;
1079 alu.dst.write = 1;
1080 alu.last = 1;
1081
1082 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1083 return r;
1084
1085 ar_reg = dst_reg;
1086 } else {
1087 ar_reg = ctx->bc->ar_reg;
1088 }
1089
1090 memset(&vtx, 0, sizeof(vtx));
1091 vtx.buffer_id = cb_idx;
1092 vtx.fetch_type = 2; /* VTX_FETCH_NO_INDEX_OFFSET */
1093 vtx.src_gpr = ar_reg;
1094 vtx.mega_fetch_count = 16;
1095 vtx.dst_gpr = dst_reg;
1096 vtx.dst_sel_x = 0; /* SEL_X */
1097 vtx.dst_sel_y = 1; /* SEL_Y */
1098 vtx.dst_sel_z = 2; /* SEL_Z */
1099 vtx.dst_sel_w = 3; /* SEL_W */
1100 vtx.data_format = FMT_32_32_32_32_FLOAT;
1101 vtx.num_format_all = 2; /* NUM_FORMAT_SCALED */
1102 vtx.format_comp_all = 1; /* FORMAT_COMP_SIGNED */
1103 vtx.srf_mode_all = 1; /* SRF_MODE_NO_ZERO */
1104 vtx.endian = r600_endian_swap(32);
1105
1106 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
1107 return r;
1108
1109 return 0;
1110 }
1111
1112 static int tgsi_split_constant(struct r600_shader_ctx *ctx)
1113 {
1114 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1115 struct r600_bytecode_alu alu;
1116 int i, j, k, nconst, r;
1117
1118 for (i = 0, nconst = 0; i < inst->Instruction.NumSrcRegs; i++) {
1119 if (inst->Src[i].Register.File == TGSI_FILE_CONSTANT) {
1120 nconst++;
1121 }
1122 tgsi_src(ctx, &inst->Src[i], &ctx->src[i]);
1123 }
1124 for (i = 0, j = nconst - 1; i < inst->Instruction.NumSrcRegs; i++) {
1125 if (inst->Src[i].Register.File != TGSI_FILE_CONSTANT) {
1126 continue;
1127 }
1128
1129 if (ctx->src[i].rel) {
1130 int treg = r600_get_temp(ctx);
1131 if ((r = tgsi_fetch_rel_const(ctx, ctx->src[i].kc_bank, ctx->src[i].sel - 512, treg)))
1132 return r;
1133
1134 ctx->src[i].kc_bank = 0;
1135 ctx->src[i].sel = treg;
1136 ctx->src[i].rel = 0;
1137 j--;
1138 } else if (j > 0) {
1139 int treg = r600_get_temp(ctx);
1140 for (k = 0; k < 4; k++) {
1141 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1142 alu.op = ALU_OP1_MOV;
1143 alu.src[0].sel = ctx->src[i].sel;
1144 alu.src[0].chan = k;
1145 alu.src[0].rel = ctx->src[i].rel;
1146 alu.dst.sel = treg;
1147 alu.dst.chan = k;
1148 alu.dst.write = 1;
1149 if (k == 3)
1150 alu.last = 1;
1151 r = r600_bytecode_add_alu(ctx->bc, &alu);
1152 if (r)
1153 return r;
1154 }
1155 ctx->src[i].sel = treg;
1156 ctx->src[i].rel =0;
1157 j--;
1158 }
1159 }
1160 return 0;
1161 }
1162
1163 /* need to move any immediate into a temp - for trig functions which use literal for PI stuff */
1164 static int tgsi_split_literal_constant(struct r600_shader_ctx *ctx)
1165 {
1166 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1167 struct r600_bytecode_alu alu;
1168 int i, j, k, nliteral, r;
1169
1170 for (i = 0, nliteral = 0; i < inst->Instruction.NumSrcRegs; i++) {
1171 if (ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
1172 nliteral++;
1173 }
1174 }
1175 for (i = 0, j = nliteral - 1; i < inst->Instruction.NumSrcRegs; i++) {
1176 if (j > 0 && ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
1177 int treg = r600_get_temp(ctx);
1178 for (k = 0; k < 4; k++) {
1179 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1180 alu.op = ALU_OP1_MOV;
1181 alu.src[0].sel = ctx->src[i].sel;
1182 alu.src[0].chan = k;
1183 alu.src[0].value = ctx->src[i].value[k];
1184 alu.dst.sel = treg;
1185 alu.dst.chan = k;
1186 alu.dst.write = 1;
1187 if (k == 3)
1188 alu.last = 1;
1189 r = r600_bytecode_add_alu(ctx->bc, &alu);
1190 if (r)
1191 return r;
1192 }
1193 ctx->src[i].sel = treg;
1194 j--;
1195 }
1196 }
1197 return 0;
1198 }
1199
1200 static int process_twoside_color_inputs(struct r600_shader_ctx *ctx)
1201 {
1202 int i, r, count = ctx->shader->ninput;
1203
1204 for (i = 0; i < count; i++) {
1205 if (ctx->shader->input[i].name == TGSI_SEMANTIC_COLOR) {
1206 r = select_twoside_color(ctx, i, ctx->shader->input[i].back_color_input);
1207 if (r)
1208 return r;
1209 }
1210 }
1211 return 0;
1212 }
1213
1214 static int r600_shader_from_tgsi(struct r600_screen *rscreen,
1215 struct r600_pipe_shader *pipeshader,
1216 struct r600_shader_key key)
1217 {
1218 struct r600_shader *shader = &pipeshader->shader;
1219 struct tgsi_token *tokens = pipeshader->selector->tokens;
1220 struct pipe_stream_output_info so = pipeshader->selector->so;
1221 struct tgsi_full_immediate *immediate;
1222 struct tgsi_full_property *property;
1223 struct r600_shader_ctx ctx;
1224 struct r600_bytecode_output output[32];
1225 unsigned output_done, noutput;
1226 unsigned opcode;
1227 int i, j, k, r = 0;
1228 int next_pixel_base = 0, next_pos_base = 60, next_param_base = 0;
1229 /* Declarations used by llvm code */
1230 bool use_llvm = false;
1231 unsigned char * inst_bytes = NULL;
1232 unsigned inst_byte_count = 0;
1233
1234 #ifdef R600_USE_LLVM
1235 use_llvm = debug_get_bool_option("R600_LLVM", TRUE);
1236 #endif
1237 ctx.bc = &shader->bc;
1238 ctx.shader = shader;
1239 ctx.native_integers = true;
1240
1241 r600_bytecode_init(ctx.bc, rscreen->chip_class, rscreen->family,
1242 rscreen->msaa_texture_support);
1243 ctx.tokens = tokens;
1244 tgsi_scan_shader(tokens, &ctx.info);
1245 tgsi_parse_init(&ctx.parse, tokens);
1246 ctx.type = ctx.parse.FullHeader.Processor.Processor;
1247 shader->processor_type = ctx.type;
1248 ctx.bc->type = shader->processor_type;
1249
1250 ctx.face_gpr = -1;
1251 ctx.fragcoord_input = -1;
1252 ctx.colors_used = 0;
1253 ctx.clip_vertex_write = 0;
1254
1255 shader->nr_ps_color_exports = 0;
1256 shader->nr_ps_max_color_exports = 0;
1257
1258 shader->two_side = key.color_two_side;
1259
1260 /* register allocations */
1261 /* Values [0,127] correspond to GPR[0..127].
1262 * Values [128,159] correspond to constant buffer bank 0
1263 * Values [160,191] correspond to constant buffer bank 1
1264 * Values [256,511] correspond to cfile constants c[0..255]. (Gone on EG)
1265 * Values [256,287] correspond to constant buffer bank 2 (EG)
1266 * Values [288,319] correspond to constant buffer bank 3 (EG)
1267 * Other special values are shown in the list below.
1268 * 244 ALU_SRC_1_DBL_L: special constant 1.0 double-float, LSW. (RV670+)
1269 * 245 ALU_SRC_1_DBL_M: special constant 1.0 double-float, MSW. (RV670+)
1270 * 246 ALU_SRC_0_5_DBL_L: special constant 0.5 double-float, LSW. (RV670+)
1271 * 247 ALU_SRC_0_5_DBL_M: special constant 0.5 double-float, MSW. (RV670+)
1272 * 248 SQ_ALU_SRC_0: special constant 0.0.
1273 * 249 SQ_ALU_SRC_1: special constant 1.0 float.
1274 * 250 SQ_ALU_SRC_1_INT: special constant 1 integer.
1275 * 251 SQ_ALU_SRC_M_1_INT: special constant -1 integer.
1276 * 252 SQ_ALU_SRC_0_5: special constant 0.5 float.
1277 * 253 SQ_ALU_SRC_LITERAL: literal constant.
1278 * 254 SQ_ALU_SRC_PV: previous vector result.
1279 * 255 SQ_ALU_SRC_PS: previous scalar result.
1280 */
1281 for (i = 0; i < TGSI_FILE_COUNT; i++) {
1282 ctx.file_offset[i] = 0;
1283 }
1284 if (ctx.type == TGSI_PROCESSOR_VERTEX) {
1285 ctx.file_offset[TGSI_FILE_INPUT] = 1;
1286 r600_bytecode_add_cfinst(ctx.bc, CF_OP_CALL_FS);
1287 }
1288 if (ctx.type == TGSI_PROCESSOR_FRAGMENT && ctx.bc->chip_class >= EVERGREEN) {
1289 ctx.file_offset[TGSI_FILE_INPUT] = evergreen_gpr_count(&ctx);
1290 }
1291
1292 #ifdef R600_USE_LLVM
1293 if (use_llvm && ctx.info.indirect_files && (ctx.info.indirect_files & (1 << TGSI_FILE_CONSTANT)) != ctx.info.indirect_files) {
1294 fprintf(stderr, "Warning: R600 LLVM backend does not support "
1295 "indirect adressing. Falling back to TGSI "
1296 "backend.\n");
1297 use_llvm = 0;
1298 }
1299 #endif
1300 ctx.use_llvm = use_llvm;
1301
1302 if (use_llvm) {
1303 ctx.file_offset[TGSI_FILE_OUTPUT] =
1304 ctx.file_offset[TGSI_FILE_INPUT];
1305 } else {
1306 ctx.file_offset[TGSI_FILE_OUTPUT] =
1307 ctx.file_offset[TGSI_FILE_INPUT] +
1308 ctx.info.file_max[TGSI_FILE_INPUT] + 1;
1309 }
1310 ctx.file_offset[TGSI_FILE_TEMPORARY] = ctx.file_offset[TGSI_FILE_OUTPUT] +
1311 ctx.info.file_max[TGSI_FILE_OUTPUT] + 1;
1312
1313 /* Outside the GPR range. This will be translated to one of the
1314 * kcache banks later. */
1315 ctx.file_offset[TGSI_FILE_CONSTANT] = 512;
1316
1317 ctx.file_offset[TGSI_FILE_IMMEDIATE] = V_SQ_ALU_SRC_LITERAL;
1318 ctx.bc->ar_reg = ctx.file_offset[TGSI_FILE_TEMPORARY] +
1319 ctx.info.file_max[TGSI_FILE_TEMPORARY] + 1;
1320 ctx.temp_reg = ctx.bc->ar_reg + 1;
1321
1322 ctx.nliterals = 0;
1323 ctx.literals = NULL;
1324 shader->fs_write_all = FALSE;
1325 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
1326 tgsi_parse_token(&ctx.parse);
1327 switch (ctx.parse.FullToken.Token.Type) {
1328 case TGSI_TOKEN_TYPE_IMMEDIATE:
1329 immediate = &ctx.parse.FullToken.FullImmediate;
1330 ctx.literals = realloc(ctx.literals, (ctx.nliterals + 1) * 16);
1331 if(ctx.literals == NULL) {
1332 r = -ENOMEM;
1333 goto out_err;
1334 }
1335 ctx.literals[ctx.nliterals * 4 + 0] = immediate->u[0].Uint;
1336 ctx.literals[ctx.nliterals * 4 + 1] = immediate->u[1].Uint;
1337 ctx.literals[ctx.nliterals * 4 + 2] = immediate->u[2].Uint;
1338 ctx.literals[ctx.nliterals * 4 + 3] = immediate->u[3].Uint;
1339 ctx.nliterals++;
1340 break;
1341 case TGSI_TOKEN_TYPE_DECLARATION:
1342 r = tgsi_declaration(&ctx);
1343 if (r)
1344 goto out_err;
1345 break;
1346 case TGSI_TOKEN_TYPE_INSTRUCTION:
1347 break;
1348 case TGSI_TOKEN_TYPE_PROPERTY:
1349 property = &ctx.parse.FullToken.FullProperty;
1350 switch (property->Property.PropertyName) {
1351 case TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS:
1352 if (property->u[0].Data == 1)
1353 shader->fs_write_all = TRUE;
1354 break;
1355 case TGSI_PROPERTY_VS_PROHIBIT_UCPS:
1356 /* we don't need this one */
1357 break;
1358 }
1359 break;
1360 default:
1361 R600_ERR("unsupported token type %d\n", ctx.parse.FullToken.Token.Type);
1362 r = -EINVAL;
1363 goto out_err;
1364 }
1365 }
1366
1367 /* Process two side if needed */
1368 if (shader->two_side && ctx.colors_used) {
1369 int i, count = ctx.shader->ninput;
1370 unsigned next_lds_loc = ctx.shader->nlds;
1371
1372 /* additional inputs will be allocated right after the existing inputs,
1373 * we won't need them after the color selection, so we don't need to
1374 * reserve these gprs for the rest of the shader code and to adjust
1375 * output offsets etc. */
1376 int gpr = ctx.file_offset[TGSI_FILE_INPUT] +
1377 ctx.info.file_max[TGSI_FILE_INPUT] + 1;
1378
1379 if (ctx.face_gpr == -1) {
1380 i = ctx.shader->ninput++;
1381 ctx.shader->input[i].name = TGSI_SEMANTIC_FACE;
1382 ctx.shader->input[i].spi_sid = 0;
1383 ctx.shader->input[i].gpr = gpr++;
1384 ctx.face_gpr = ctx.shader->input[i].gpr;
1385 }
1386
1387 for (i = 0; i < count; i++) {
1388 if (ctx.shader->input[i].name == TGSI_SEMANTIC_COLOR) {
1389 int ni = ctx.shader->ninput++;
1390 memcpy(&ctx.shader->input[ni],&ctx.shader->input[i], sizeof(struct r600_shader_io));
1391 ctx.shader->input[ni].name = TGSI_SEMANTIC_BCOLOR;
1392 ctx.shader->input[ni].spi_sid = r600_spi_sid(&ctx.shader->input[ni]);
1393 ctx.shader->input[ni].gpr = gpr++;
1394 // TGSI to LLVM needs to know the lds position of inputs.
1395 // Non LLVM path computes it later (in process_twoside_color)
1396 ctx.shader->input[ni].lds_pos = next_lds_loc++;
1397 ctx.shader->input[i].back_color_input = ni;
1398 if (ctx.bc->chip_class >= EVERGREEN) {
1399 if ((r = evergreen_interp_input(&ctx, ni)))
1400 return r;
1401 }
1402 }
1403 }
1404 }
1405
1406 /* LLVM backend setup */
1407 #ifdef R600_USE_LLVM
1408 if (use_llvm) {
1409 struct radeon_llvm_context radeon_llvm_ctx;
1410 LLVMModuleRef mod;
1411 unsigned dump = 0;
1412 memset(&radeon_llvm_ctx, 0, sizeof(radeon_llvm_ctx));
1413 radeon_llvm_ctx.type = ctx.type;
1414 radeon_llvm_ctx.two_side = shader->two_side;
1415 radeon_llvm_ctx.face_gpr = ctx.face_gpr;
1416 radeon_llvm_ctx.r600_inputs = ctx.shader->input;
1417 radeon_llvm_ctx.r600_outputs = ctx.shader->output;
1418 radeon_llvm_ctx.color_buffer_count = MAX2(key.nr_cbufs , 1);
1419 radeon_llvm_ctx.chip_class = ctx.bc->chip_class;
1420 radeon_llvm_ctx.fs_color_all = shader->fs_write_all && (rscreen->chip_class >= EVERGREEN);
1421 radeon_llvm_ctx.stream_outputs = &so;
1422 radeon_llvm_ctx.clip_vertex = ctx.cv_output;
1423 mod = r600_tgsi_llvm(&radeon_llvm_ctx, tokens);
1424 if (debug_get_bool_option("R600_DUMP_SHADERS", FALSE)) {
1425 dump = 1;
1426 }
1427 if (r600_llvm_compile(mod, &inst_bytes, &inst_byte_count,
1428 rscreen->family, dump)) {
1429 FREE(inst_bytes);
1430 radeon_llvm_dispose(&radeon_llvm_ctx);
1431 use_llvm = 0;
1432 fprintf(stderr, "R600 LLVM backend failed to compile "
1433 "shader. Falling back to TGSI\n");
1434 } else {
1435 ctx.file_offset[TGSI_FILE_OUTPUT] =
1436 ctx.file_offset[TGSI_FILE_INPUT];
1437 }
1438 radeon_llvm_dispose(&radeon_llvm_ctx);
1439 }
1440 #endif
1441 /* End of LLVM backend setup */
1442
1443 if (shader->fs_write_all && rscreen->chip_class >= EVERGREEN)
1444 shader->nr_ps_max_color_exports = 8;
1445
1446 if (!use_llvm) {
1447 if (ctx.fragcoord_input >= 0) {
1448 if (ctx.bc->chip_class == CAYMAN) {
1449 for (j = 0 ; j < 4; j++) {
1450 struct r600_bytecode_alu alu;
1451 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1452 alu.op = ALU_OP1_RECIP_IEEE;
1453 alu.src[0].sel = shader->input[ctx.fragcoord_input].gpr;
1454 alu.src[0].chan = 3;
1455
1456 alu.dst.sel = shader->input[ctx.fragcoord_input].gpr;
1457 alu.dst.chan = j;
1458 alu.dst.write = (j == 3);
1459 alu.last = 1;
1460 if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
1461 return r;
1462 }
1463 } else {
1464 struct r600_bytecode_alu alu;
1465 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1466 alu.op = ALU_OP1_RECIP_IEEE;
1467 alu.src[0].sel = shader->input[ctx.fragcoord_input].gpr;
1468 alu.src[0].chan = 3;
1469
1470 alu.dst.sel = shader->input[ctx.fragcoord_input].gpr;
1471 alu.dst.chan = 3;
1472 alu.dst.write = 1;
1473 alu.last = 1;
1474 if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
1475 return r;
1476 }
1477 }
1478
1479 if (shader->two_side && ctx.colors_used) {
1480 if ((r = process_twoside_color_inputs(&ctx)))
1481 return r;
1482 }
1483
1484 tgsi_parse_init(&ctx.parse, tokens);
1485 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
1486 tgsi_parse_token(&ctx.parse);
1487 switch (ctx.parse.FullToken.Token.Type) {
1488 case TGSI_TOKEN_TYPE_INSTRUCTION:
1489 r = tgsi_is_supported(&ctx);
1490 if (r)
1491 goto out_err;
1492 ctx.max_driver_temp_used = 0;
1493 /* reserve first tmp for everyone */
1494 r600_get_temp(&ctx);
1495
1496 opcode = ctx.parse.FullToken.FullInstruction.Instruction.Opcode;
1497 if ((r = tgsi_split_constant(&ctx)))
1498 goto out_err;
1499 if ((r = tgsi_split_literal_constant(&ctx)))
1500 goto out_err;
1501 if (ctx.bc->chip_class == CAYMAN)
1502 ctx.inst_info = &cm_shader_tgsi_instruction[opcode];
1503 else if (ctx.bc->chip_class >= EVERGREEN)
1504 ctx.inst_info = &eg_shader_tgsi_instruction[opcode];
1505 else
1506 ctx.inst_info = &r600_shader_tgsi_instruction[opcode];
1507 r = ctx.inst_info->process(&ctx);
1508 if (r)
1509 goto out_err;
1510 break;
1511 default:
1512 break;
1513 }
1514 }
1515 }
1516
1517 /* Reset the temporary register counter. */
1518 ctx.max_driver_temp_used = 0;
1519
1520 /* Get instructions if we are using the LLVM backend. */
1521 if (use_llvm) {
1522 r600_bytecode_from_byte_stream(&ctx, inst_bytes, inst_byte_count);
1523 FREE(inst_bytes);
1524 }
1525
1526 noutput = shader->noutput;
1527
1528 if (ctx.clip_vertex_write) {
1529 unsigned clipdist_temp[2];
1530
1531 clipdist_temp[0] = r600_get_temp(&ctx);
1532 clipdist_temp[1] = r600_get_temp(&ctx);
1533
1534 /* need to convert a clipvertex write into clipdistance writes and not export
1535 the clip vertex anymore */
1536
1537 memset(&shader->output[noutput], 0, 2*sizeof(struct r600_shader_io));
1538 shader->output[noutput].name = TGSI_SEMANTIC_CLIPDIST;
1539 shader->output[noutput].gpr = clipdist_temp[0];
1540 noutput++;
1541 shader->output[noutput].name = TGSI_SEMANTIC_CLIPDIST;
1542 shader->output[noutput].gpr = clipdist_temp[1];
1543 noutput++;
1544
1545 /* reset spi_sid for clipvertex output to avoid confusing spi */
1546 shader->output[ctx.cv_output].spi_sid = 0;
1547
1548 shader->clip_dist_write = 0xFF;
1549
1550 for (i = 0; i < 8; i++) {
1551 int oreg = i >> 2;
1552 int ochan = i & 3;
1553
1554 for (j = 0; j < 4; j++) {
1555 struct r600_bytecode_alu alu;
1556 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1557 alu.op = ALU_OP2_DOT4;
1558 alu.src[0].sel = shader->output[ctx.cv_output].gpr;
1559 alu.src[0].chan = j;
1560
1561 alu.src[1].sel = 512 + i;
1562 alu.src[1].kc_bank = R600_UCP_CONST_BUFFER;
1563 alu.src[1].chan = j;
1564
1565 alu.dst.sel = clipdist_temp[oreg];
1566 alu.dst.chan = j;
1567 alu.dst.write = (j == ochan);
1568 if (j == 3)
1569 alu.last = 1;
1570 if (!use_llvm)
1571 r = r600_bytecode_add_alu(ctx.bc, &alu);
1572 if (r)
1573 return r;
1574 }
1575 }
1576 }
1577
1578 /* Add stream outputs. */
1579 if (ctx.type == TGSI_PROCESSOR_VERTEX && so.num_outputs && !use_llvm) {
1580 unsigned so_gpr[PIPE_MAX_SHADER_OUTPUTS];
1581
1582 /* Sanity checking. */
1583 if (so.num_outputs > PIPE_MAX_SHADER_OUTPUTS) {
1584 R600_ERR("Too many stream outputs: %d\n", so.num_outputs);
1585 r = -EINVAL;
1586 goto out_err;
1587 }
1588 for (i = 0; i < so.num_outputs; i++) {
1589 if (so.output[i].output_buffer >= 4) {
1590 R600_ERR("Exceeded the max number of stream output buffers, got: %d\n",
1591 so.output[i].output_buffer);
1592 r = -EINVAL;
1593 goto out_err;
1594 }
1595 }
1596
1597 /* Initialize locations where the outputs are stored. */
1598 for (i = 0; i < so.num_outputs; i++) {
1599 so_gpr[i] = shader->output[so.output[i].register_index].gpr;
1600
1601 /* Lower outputs with dst_offset < start_component.
1602 *
1603 * We can only output 4D vectors with a write mask, e.g. we can
1604 * only output the W component at offset 3, etc. If we want
1605 * to store Y, Z, or W at buffer offset 0, we need to use MOV
1606 * to move it to X and output X. */
1607 if (so.output[i].dst_offset < so.output[i].start_component) {
1608 unsigned tmp = r600_get_temp(&ctx);
1609
1610 for (j = 0; j < so.output[i].num_components; j++) {
1611 struct r600_bytecode_alu alu;
1612 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1613 alu.op = ALU_OP1_MOV;
1614 alu.src[0].sel = so_gpr[i];
1615 alu.src[0].chan = so.output[i].start_component + j;
1616
1617 alu.dst.sel = tmp;
1618 alu.dst.chan = j;
1619 alu.dst.write = 1;
1620 if (j == so.output[i].num_components - 1)
1621 alu.last = 1;
1622 r = r600_bytecode_add_alu(ctx.bc, &alu);
1623 if (r)
1624 return r;
1625 }
1626 so.output[i].start_component = 0;
1627 so_gpr[i] = tmp;
1628 }
1629 }
1630
1631 /* Write outputs to buffers. */
1632 for (i = 0; i < so.num_outputs; i++) {
1633 struct r600_bytecode_output output;
1634
1635 memset(&output, 0, sizeof(struct r600_bytecode_output));
1636 output.gpr = so_gpr[i];
1637 output.elem_size = so.output[i].num_components;
1638 output.array_base = so.output[i].dst_offset - so.output[i].start_component;
1639 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE;
1640 output.burst_count = 1;
1641 output.barrier = 1;
1642 /* array_size is an upper limit for the burst_count
1643 * with MEM_STREAM instructions */
1644 output.array_size = 0xFFF;
1645 output.comp_mask = ((1 << so.output[i].num_components) - 1) << so.output[i].start_component;
1646 if (ctx.bc->chip_class >= EVERGREEN) {
1647 switch (so.output[i].output_buffer) {
1648 case 0:
1649 output.op = CF_OP_MEM_STREAM0_BUF0;
1650 break;
1651 case 1:
1652 output.op = CF_OP_MEM_STREAM0_BUF1;
1653 break;
1654 case 2:
1655 output.op = CF_OP_MEM_STREAM0_BUF2;
1656 break;
1657 case 3:
1658 output.op = CF_OP_MEM_STREAM0_BUF3;
1659 break;
1660 }
1661 } else {
1662 switch (so.output[i].output_buffer) {
1663 case 0:
1664 output.op = CF_OP_MEM_STREAM0;
1665 break;
1666 case 1:
1667 output.op = CF_OP_MEM_STREAM1;
1668 break;
1669 case 2:
1670 output.op = CF_OP_MEM_STREAM2;
1671 break;
1672 case 3:
1673 output.op = CF_OP_MEM_STREAM3;
1674 break;
1675 }
1676 }
1677 r = r600_bytecode_add_output(ctx.bc, &output);
1678 if (r)
1679 goto out_err;
1680 }
1681 }
1682
1683 /* export output */
1684 for (i = 0, j = 0; i < noutput; i++, j++) {
1685 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
1686 output[j].gpr = shader->output[i].gpr;
1687 output[j].elem_size = 3;
1688 output[j].swizzle_x = 0;
1689 output[j].swizzle_y = 1;
1690 output[j].swizzle_z = 2;
1691 output[j].swizzle_w = 3;
1692 output[j].burst_count = 1;
1693 output[j].barrier = 1;
1694 output[j].type = -1;
1695 output[j].op = CF_OP_EXPORT;
1696 switch (ctx.type) {
1697 case TGSI_PROCESSOR_VERTEX:
1698 switch (shader->output[i].name) {
1699 case TGSI_SEMANTIC_POSITION:
1700 output[j].array_base = next_pos_base++;
1701 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1702 break;
1703
1704 case TGSI_SEMANTIC_PSIZE:
1705 output[j].array_base = next_pos_base++;
1706 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1707 break;
1708 case TGSI_SEMANTIC_CLIPVERTEX:
1709 j--;
1710 break;
1711 case TGSI_SEMANTIC_CLIPDIST:
1712 output[j].array_base = next_pos_base++;
1713 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1714 /* spi_sid is 0 for clipdistance outputs that were generated
1715 * for clipvertex - we don't need to pass them to PS */
1716 if (shader->output[i].spi_sid) {
1717 j++;
1718 /* duplicate it as PARAM to pass to the pixel shader */
1719 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
1720 output[j].array_base = next_param_base++;
1721 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
1722 }
1723 break;
1724 case TGSI_SEMANTIC_FOG:
1725 output[j].swizzle_y = 4; /* 0 */
1726 output[j].swizzle_z = 4; /* 0 */
1727 output[j].swizzle_w = 5; /* 1 */
1728 break;
1729 }
1730 break;
1731 case TGSI_PROCESSOR_FRAGMENT:
1732 if (shader->output[i].name == TGSI_SEMANTIC_COLOR) {
1733 /* never export more colors than the number of CBs */
1734 if (next_pixel_base && next_pixel_base >= key.nr_cbufs) {
1735 /* skip export */
1736 j--;
1737 continue;
1738 }
1739 output[j].swizzle_w = key.alpha_to_one ? 5 : 3;
1740 output[j].array_base = next_pixel_base++;
1741 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
1742 shader->nr_ps_color_exports++;
1743 if (shader->fs_write_all && (rscreen->chip_class >= EVERGREEN)) {
1744 for (k = 1; k < key.nr_cbufs; k++) {
1745 j++;
1746 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
1747 output[j].gpr = shader->output[i].gpr;
1748 output[j].elem_size = 3;
1749 output[j].swizzle_x = 0;
1750 output[j].swizzle_y = 1;
1751 output[j].swizzle_z = 2;
1752 output[j].swizzle_w = key.alpha_to_one ? 5 : 3;
1753 output[j].burst_count = 1;
1754 output[j].barrier = 1;
1755 output[j].array_base = next_pixel_base++;
1756 output[j].op = CF_OP_EXPORT;
1757 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
1758 shader->nr_ps_color_exports++;
1759 }
1760 }
1761 } else if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
1762 output[j].array_base = 61;
1763 output[j].swizzle_x = 2;
1764 output[j].swizzle_y = 7;
1765 output[j].swizzle_z = output[j].swizzle_w = 7;
1766 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
1767 } else if (shader->output[i].name == TGSI_SEMANTIC_STENCIL) {
1768 output[j].array_base = 61;
1769 output[j].swizzle_x = 7;
1770 output[j].swizzle_y = 1;
1771 output[j].swizzle_z = output[j].swizzle_w = 7;
1772 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
1773 } else {
1774 R600_ERR("unsupported fragment output name %d\n", shader->output[i].name);
1775 r = -EINVAL;
1776 goto out_err;
1777 }
1778 break;
1779 default:
1780 R600_ERR("unsupported processor type %d\n", ctx.type);
1781 r = -EINVAL;
1782 goto out_err;
1783 }
1784
1785 if (output[j].type==-1) {
1786 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
1787 output[j].array_base = next_param_base++;
1788 }
1789 }
1790
1791 /* add fake position export */
1792 if (ctx.type == TGSI_PROCESSOR_VERTEX && next_pos_base == 60) {
1793 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
1794 output[j].gpr = 0;
1795 output[j].elem_size = 3;
1796 output[j].swizzle_x = 7;
1797 output[j].swizzle_y = 7;
1798 output[j].swizzle_z = 7;
1799 output[j].swizzle_w = 7;
1800 output[j].burst_count = 1;
1801 output[j].barrier = 1;
1802 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1803 output[j].array_base = next_pos_base;
1804 output[j].op = CF_OP_EXPORT;
1805 j++;
1806 }
1807
1808 /* add fake param output for vertex shader if no param is exported */
1809 if (ctx.type == TGSI_PROCESSOR_VERTEX && next_param_base == 0) {
1810 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
1811 output[j].gpr = 0;
1812 output[j].elem_size = 3;
1813 output[j].swizzle_x = 7;
1814 output[j].swizzle_y = 7;
1815 output[j].swizzle_z = 7;
1816 output[j].swizzle_w = 7;
1817 output[j].burst_count = 1;
1818 output[j].barrier = 1;
1819 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
1820 output[j].array_base = 0;
1821 output[j].op = CF_OP_EXPORT;
1822 j++;
1823 }
1824
1825 /* add fake pixel export */
1826 if (ctx.type == TGSI_PROCESSOR_FRAGMENT && next_pixel_base == 0) {
1827 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
1828 output[j].gpr = 0;
1829 output[j].elem_size = 3;
1830 output[j].swizzle_x = 7;
1831 output[j].swizzle_y = 7;
1832 output[j].swizzle_z = 7;
1833 output[j].swizzle_w = 7;
1834 output[j].burst_count = 1;
1835 output[j].barrier = 1;
1836 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
1837 output[j].array_base = 0;
1838 output[j].op = CF_OP_EXPORT;
1839 j++;
1840 }
1841
1842 noutput = j;
1843
1844 /* set export done on last export of each type */
1845 for (i = noutput - 1, output_done = 0; i >= 0; i--) {
1846 if (ctx.bc->chip_class < CAYMAN) {
1847 if (i == (noutput - 1)) {
1848 output[i].end_of_program = 1;
1849 }
1850 }
1851 if (!(output_done & (1 << output[i].type))) {
1852 output_done |= (1 << output[i].type);
1853 output[i].op = CF_OP_EXPORT_DONE;
1854 }
1855 }
1856 /* add output to bytecode */
1857 if (!use_llvm) {
1858 for (i = 0; i < noutput; i++) {
1859 r = r600_bytecode_add_output(ctx.bc, &output[i]);
1860 if (r)
1861 goto out_err;
1862 }
1863 }
1864 /* add program end */
1865 if (ctx.bc->chip_class == CAYMAN)
1866 cm_bytecode_add_cf_end(ctx.bc);
1867
1868 /* check GPR limit - we have 124 = 128 - 4
1869 * (4 are reserved as alu clause temporary registers) */
1870 if (ctx.bc->ngpr > 124) {
1871 R600_ERR("GPR limit exceeded - shader requires %d registers\n", ctx.bc->ngpr);
1872 r = -ENOMEM;
1873 goto out_err;
1874 }
1875
1876 free(ctx.literals);
1877 tgsi_parse_free(&ctx.parse);
1878 return 0;
1879 out_err:
1880 free(ctx.literals);
1881 tgsi_parse_free(&ctx.parse);
1882 return r;
1883 }
1884
1885 static int tgsi_unsupported(struct r600_shader_ctx *ctx)
1886 {
1887 R600_ERR("%s tgsi opcode unsupported\n",
1888 tgsi_get_opcode_name(ctx->inst_info->tgsi_opcode));
1889 return -EINVAL;
1890 }
1891
1892 static int tgsi_end(struct r600_shader_ctx *ctx)
1893 {
1894 return 0;
1895 }
1896
1897 static void r600_bytecode_src(struct r600_bytecode_alu_src *bc_src,
1898 const struct r600_shader_src *shader_src,
1899 unsigned chan)
1900 {
1901 bc_src->sel = shader_src->sel;
1902 bc_src->chan = shader_src->swizzle[chan];
1903 bc_src->neg = shader_src->neg;
1904 bc_src->abs = shader_src->abs;
1905 bc_src->rel = shader_src->rel;
1906 bc_src->value = shader_src->value[bc_src->chan];
1907 bc_src->kc_bank = shader_src->kc_bank;
1908 }
1909
1910 static void r600_bytecode_src_set_abs(struct r600_bytecode_alu_src *bc_src)
1911 {
1912 bc_src->abs = 1;
1913 bc_src->neg = 0;
1914 }
1915
1916 static void r600_bytecode_src_toggle_neg(struct r600_bytecode_alu_src *bc_src)
1917 {
1918 bc_src->neg = !bc_src->neg;
1919 }
1920
1921 static void tgsi_dst(struct r600_shader_ctx *ctx,
1922 const struct tgsi_full_dst_register *tgsi_dst,
1923 unsigned swizzle,
1924 struct r600_bytecode_alu_dst *r600_dst)
1925 {
1926 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1927
1928 r600_dst->sel = tgsi_dst->Register.Index;
1929 r600_dst->sel += ctx->file_offset[tgsi_dst->Register.File];
1930 r600_dst->chan = swizzle;
1931 r600_dst->write = 1;
1932 if (tgsi_dst->Register.Indirect)
1933 r600_dst->rel = V_SQ_REL_RELATIVE;
1934 if (inst->Instruction.Saturate) {
1935 r600_dst->clamp = 1;
1936 }
1937 }
1938
1939 static int tgsi_last_instruction(unsigned writemask)
1940 {
1941 int i, lasti = 0;
1942
1943 for (i = 0; i < 4; i++) {
1944 if (writemask & (1 << i)) {
1945 lasti = i;
1946 }
1947 }
1948 return lasti;
1949 }
1950
1951 static int tgsi_op2_s(struct r600_shader_ctx *ctx, int swap, int trans_only)
1952 {
1953 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1954 struct r600_bytecode_alu alu;
1955 int i, j, r;
1956 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
1957
1958 for (i = 0; i < lasti + 1; i++) {
1959 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
1960 continue;
1961
1962 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1963 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
1964
1965 alu.op = ctx->inst_info->op;
1966 if (!swap) {
1967 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
1968 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
1969 }
1970 } else {
1971 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
1972 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
1973 }
1974 /* handle some special cases */
1975 switch (ctx->inst_info->tgsi_opcode) {
1976 case TGSI_OPCODE_SUB:
1977 r600_bytecode_src_toggle_neg(&alu.src[1]);
1978 break;
1979 case TGSI_OPCODE_ABS:
1980 r600_bytecode_src_set_abs(&alu.src[0]);
1981 break;
1982 default:
1983 break;
1984 }
1985 if (i == lasti || trans_only) {
1986 alu.last = 1;
1987 }
1988 r = r600_bytecode_add_alu(ctx->bc, &alu);
1989 if (r)
1990 return r;
1991 }
1992 return 0;
1993 }
1994
1995 static int tgsi_op2(struct r600_shader_ctx *ctx)
1996 {
1997 return tgsi_op2_s(ctx, 0, 0);
1998 }
1999
2000 static int tgsi_op2_swap(struct r600_shader_ctx *ctx)
2001 {
2002 return tgsi_op2_s(ctx, 1, 0);
2003 }
2004
2005 static int tgsi_op2_trans(struct r600_shader_ctx *ctx)
2006 {
2007 return tgsi_op2_s(ctx, 0, 1);
2008 }
2009
2010 static int tgsi_ineg(struct r600_shader_ctx *ctx)
2011 {
2012 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2013 struct r600_bytecode_alu alu;
2014 int i, r;
2015 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
2016
2017 for (i = 0; i < lasti + 1; i++) {
2018
2019 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
2020 continue;
2021 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2022 alu.op = ctx->inst_info->op;
2023
2024 alu.src[0].sel = V_SQ_ALU_SRC_0;
2025
2026 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
2027
2028 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2029
2030 if (i == lasti) {
2031 alu.last = 1;
2032 }
2033 r = r600_bytecode_add_alu(ctx->bc, &alu);
2034 if (r)
2035 return r;
2036 }
2037 return 0;
2038
2039 }
2040
2041 static int cayman_emit_float_instr(struct r600_shader_ctx *ctx)
2042 {
2043 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2044 int i, j, r;
2045 struct r600_bytecode_alu alu;
2046 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
2047
2048 for (i = 0 ; i < last_slot; i++) {
2049 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2050 alu.op = ctx->inst_info->op;
2051 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
2052 r600_bytecode_src(&alu.src[j], &ctx->src[j], 0);
2053
2054 /* RSQ should take the absolute value of src */
2055 if (ctx->inst_info->tgsi_opcode == TGSI_OPCODE_RSQ) {
2056 r600_bytecode_src_set_abs(&alu.src[j]);
2057 }
2058 }
2059 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2060 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
2061
2062 if (i == last_slot - 1)
2063 alu.last = 1;
2064 r = r600_bytecode_add_alu(ctx->bc, &alu);
2065 if (r)
2066 return r;
2067 }
2068 return 0;
2069 }
2070
2071 static int cayman_mul_int_instr(struct r600_shader_ctx *ctx)
2072 {
2073 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2074 int i, j, k, r;
2075 struct r600_bytecode_alu alu;
2076 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
2077 for (k = 0; k < last_slot; k++) {
2078 if (!(inst->Dst[0].Register.WriteMask & (1 << k)))
2079 continue;
2080
2081 for (i = 0 ; i < 4; i++) {
2082 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2083 alu.op = ctx->inst_info->op;
2084 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
2085 r600_bytecode_src(&alu.src[j], &ctx->src[j], k);
2086 }
2087 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2088 alu.dst.write = (i == k);
2089 if (i == 3)
2090 alu.last = 1;
2091 r = r600_bytecode_add_alu(ctx->bc, &alu);
2092 if (r)
2093 return r;
2094 }
2095 }
2096 return 0;
2097 }
2098
2099 /*
2100 * r600 - trunc to -PI..PI range
2101 * r700 - normalize by dividing by 2PI
2102 * see fdo bug 27901
2103 */
2104 static int tgsi_setup_trig(struct r600_shader_ctx *ctx)
2105 {
2106 static float half_inv_pi = 1.0 /(3.1415926535 * 2);
2107 static float double_pi = 3.1415926535 * 2;
2108 static float neg_pi = -3.1415926535;
2109
2110 int r;
2111 struct r600_bytecode_alu alu;
2112
2113 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2114 alu.op = ALU_OP3_MULADD;
2115 alu.is_op3 = 1;
2116
2117 alu.dst.chan = 0;
2118 alu.dst.sel = ctx->temp_reg;
2119 alu.dst.write = 1;
2120
2121 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
2122
2123 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2124 alu.src[1].chan = 0;
2125 alu.src[1].value = *(uint32_t *)&half_inv_pi;
2126 alu.src[2].sel = V_SQ_ALU_SRC_0_5;
2127 alu.src[2].chan = 0;
2128 alu.last = 1;
2129 r = r600_bytecode_add_alu(ctx->bc, &alu);
2130 if (r)
2131 return r;
2132
2133 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2134 alu.op = ALU_OP1_FRACT;
2135
2136 alu.dst.chan = 0;
2137 alu.dst.sel = ctx->temp_reg;
2138 alu.dst.write = 1;
2139
2140 alu.src[0].sel = ctx->temp_reg;
2141 alu.src[0].chan = 0;
2142 alu.last = 1;
2143 r = r600_bytecode_add_alu(ctx->bc, &alu);
2144 if (r)
2145 return r;
2146
2147 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2148 alu.op = ALU_OP3_MULADD;
2149 alu.is_op3 = 1;
2150
2151 alu.dst.chan = 0;
2152 alu.dst.sel = ctx->temp_reg;
2153 alu.dst.write = 1;
2154
2155 alu.src[0].sel = ctx->temp_reg;
2156 alu.src[0].chan = 0;
2157
2158 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2159 alu.src[1].chan = 0;
2160 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
2161 alu.src[2].chan = 0;
2162
2163 if (ctx->bc->chip_class == R600) {
2164 alu.src[1].value = *(uint32_t *)&double_pi;
2165 alu.src[2].value = *(uint32_t *)&neg_pi;
2166 } else {
2167 alu.src[1].sel = V_SQ_ALU_SRC_1;
2168 alu.src[2].sel = V_SQ_ALU_SRC_0_5;
2169 alu.src[2].neg = 1;
2170 }
2171
2172 alu.last = 1;
2173 r = r600_bytecode_add_alu(ctx->bc, &alu);
2174 if (r)
2175 return r;
2176 return 0;
2177 }
2178
2179 static int cayman_trig(struct r600_shader_ctx *ctx)
2180 {
2181 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2182 struct r600_bytecode_alu alu;
2183 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
2184 int i, r;
2185
2186 r = tgsi_setup_trig(ctx);
2187 if (r)
2188 return r;
2189
2190
2191 for (i = 0; i < last_slot; i++) {
2192 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2193 alu.op = ctx->inst_info->op;
2194 alu.dst.chan = i;
2195
2196 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2197 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
2198
2199 alu.src[0].sel = ctx->temp_reg;
2200 alu.src[0].chan = 0;
2201 if (i == last_slot - 1)
2202 alu.last = 1;
2203 r = r600_bytecode_add_alu(ctx->bc, &alu);
2204 if (r)
2205 return r;
2206 }
2207 return 0;
2208 }
2209
2210 static int tgsi_trig(struct r600_shader_ctx *ctx)
2211 {
2212 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2213 struct r600_bytecode_alu alu;
2214 int i, r;
2215 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
2216
2217 r = tgsi_setup_trig(ctx);
2218 if (r)
2219 return r;
2220
2221 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2222 alu.op = ctx->inst_info->op;
2223 alu.dst.chan = 0;
2224 alu.dst.sel = ctx->temp_reg;
2225 alu.dst.write = 1;
2226
2227 alu.src[0].sel = ctx->temp_reg;
2228 alu.src[0].chan = 0;
2229 alu.last = 1;
2230 r = r600_bytecode_add_alu(ctx->bc, &alu);
2231 if (r)
2232 return r;
2233
2234 /* replicate result */
2235 for (i = 0; i < lasti + 1; i++) {
2236 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
2237 continue;
2238
2239 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2240 alu.op = ALU_OP1_MOV;
2241
2242 alu.src[0].sel = ctx->temp_reg;
2243 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2244 if (i == lasti)
2245 alu.last = 1;
2246 r = r600_bytecode_add_alu(ctx->bc, &alu);
2247 if (r)
2248 return r;
2249 }
2250 return 0;
2251 }
2252
2253 static int tgsi_scs(struct r600_shader_ctx *ctx)
2254 {
2255 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2256 struct r600_bytecode_alu alu;
2257 int i, r;
2258
2259 /* We'll only need the trig stuff if we are going to write to the
2260 * X or Y components of the destination vector.
2261 */
2262 if (likely(inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_XY)) {
2263 r = tgsi_setup_trig(ctx);
2264 if (r)
2265 return r;
2266 }
2267
2268 /* dst.x = COS */
2269 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
2270 if (ctx->bc->chip_class == CAYMAN) {
2271 for (i = 0 ; i < 3; i++) {
2272 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2273 alu.op = ALU_OP1_COS;
2274 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2275
2276 if (i == 0)
2277 alu.dst.write = 1;
2278 else
2279 alu.dst.write = 0;
2280 alu.src[0].sel = ctx->temp_reg;
2281 alu.src[0].chan = 0;
2282 if (i == 2)
2283 alu.last = 1;
2284 r = r600_bytecode_add_alu(ctx->bc, &alu);
2285 if (r)
2286 return r;
2287 }
2288 } else {
2289 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2290 alu.op = ALU_OP1_COS;
2291 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
2292
2293 alu.src[0].sel = ctx->temp_reg;
2294 alu.src[0].chan = 0;
2295 alu.last = 1;
2296 r = r600_bytecode_add_alu(ctx->bc, &alu);
2297 if (r)
2298 return r;
2299 }
2300 }
2301
2302 /* dst.y = SIN */
2303 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
2304 if (ctx->bc->chip_class == CAYMAN) {
2305 for (i = 0 ; i < 3; i++) {
2306 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2307 alu.op = ALU_OP1_SIN;
2308 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2309 if (i == 1)
2310 alu.dst.write = 1;
2311 else
2312 alu.dst.write = 0;
2313 alu.src[0].sel = ctx->temp_reg;
2314 alu.src[0].chan = 0;
2315 if (i == 2)
2316 alu.last = 1;
2317 r = r600_bytecode_add_alu(ctx->bc, &alu);
2318 if (r)
2319 return r;
2320 }
2321 } else {
2322 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2323 alu.op = ALU_OP1_SIN;
2324 tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
2325
2326 alu.src[0].sel = ctx->temp_reg;
2327 alu.src[0].chan = 0;
2328 alu.last = 1;
2329 r = r600_bytecode_add_alu(ctx->bc, &alu);
2330 if (r)
2331 return r;
2332 }
2333 }
2334
2335 /* dst.z = 0.0; */
2336 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
2337 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2338
2339 alu.op = ALU_OP1_MOV;
2340
2341 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
2342
2343 alu.src[0].sel = V_SQ_ALU_SRC_0;
2344 alu.src[0].chan = 0;
2345
2346 alu.last = 1;
2347
2348 r = r600_bytecode_add_alu(ctx->bc, &alu);
2349 if (r)
2350 return r;
2351 }
2352
2353 /* dst.w = 1.0; */
2354 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
2355 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2356
2357 alu.op = ALU_OP1_MOV;
2358
2359 tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
2360
2361 alu.src[0].sel = V_SQ_ALU_SRC_1;
2362 alu.src[0].chan = 0;
2363
2364 alu.last = 1;
2365
2366 r = r600_bytecode_add_alu(ctx->bc, &alu);
2367 if (r)
2368 return r;
2369 }
2370
2371 return 0;
2372 }
2373
2374 static int tgsi_kill(struct r600_shader_ctx *ctx)
2375 {
2376 struct r600_bytecode_alu alu;
2377 int i, r;
2378
2379 for (i = 0; i < 4; i++) {
2380 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2381 alu.op = ctx->inst_info->op;
2382
2383 alu.dst.chan = i;
2384
2385 alu.src[0].sel = V_SQ_ALU_SRC_0;
2386
2387 if (ctx->inst_info->tgsi_opcode == TGSI_OPCODE_KILP) {
2388 alu.src[1].sel = V_SQ_ALU_SRC_1;
2389 alu.src[1].neg = 1;
2390 } else {
2391 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
2392 }
2393 if (i == 3) {
2394 alu.last = 1;
2395 }
2396 r = r600_bytecode_add_alu(ctx->bc, &alu);
2397 if (r)
2398 return r;
2399 }
2400
2401 /* kill must be last in ALU */
2402 ctx->bc->force_add_cf = 1;
2403 ctx->shader->uses_kill = TRUE;
2404 return 0;
2405 }
2406
2407 static int tgsi_lit(struct r600_shader_ctx *ctx)
2408 {
2409 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2410 struct r600_bytecode_alu alu;
2411 int r;
2412
2413 /* tmp.x = max(src.y, 0.0) */
2414 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2415 alu.op = ALU_OP2_MAX;
2416 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
2417 alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/
2418 alu.src[1].chan = 1;
2419
2420 alu.dst.sel = ctx->temp_reg;
2421 alu.dst.chan = 0;
2422 alu.dst.write = 1;
2423
2424 alu.last = 1;
2425 r = r600_bytecode_add_alu(ctx->bc, &alu);
2426 if (r)
2427 return r;
2428
2429 if (inst->Dst[0].Register.WriteMask & (1 << 2))
2430 {
2431 int chan;
2432 int sel;
2433 int i;
2434
2435 if (ctx->bc->chip_class == CAYMAN) {
2436 for (i = 0; i < 3; i++) {
2437 /* tmp.z = log(tmp.x) */
2438 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2439 alu.op = ALU_OP1_LOG_CLAMPED;
2440 alu.src[0].sel = ctx->temp_reg;
2441 alu.src[0].chan = 0;
2442 alu.dst.sel = ctx->temp_reg;
2443 alu.dst.chan = i;
2444 if (i == 2) {
2445 alu.dst.write = 1;
2446 alu.last = 1;
2447 } else
2448 alu.dst.write = 0;
2449
2450 r = r600_bytecode_add_alu(ctx->bc, &alu);
2451 if (r)
2452 return r;
2453 }
2454 } else {
2455 /* tmp.z = log(tmp.x) */
2456 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2457 alu.op = ALU_OP1_LOG_CLAMPED;
2458 alu.src[0].sel = ctx->temp_reg;
2459 alu.src[0].chan = 0;
2460 alu.dst.sel = ctx->temp_reg;
2461 alu.dst.chan = 2;
2462 alu.dst.write = 1;
2463 alu.last = 1;
2464 r = r600_bytecode_add_alu(ctx->bc, &alu);
2465 if (r)
2466 return r;
2467 }
2468
2469 chan = alu.dst.chan;
2470 sel = alu.dst.sel;
2471
2472 /* tmp.x = amd MUL_LIT(tmp.z, src.w, src.x ) */
2473 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2474 alu.op = ALU_OP3_MUL_LIT;
2475 alu.src[0].sel = sel;
2476 alu.src[0].chan = chan;
2477 r600_bytecode_src(&alu.src[1], &ctx->src[0], 3);
2478 r600_bytecode_src(&alu.src[2], &ctx->src[0], 0);
2479 alu.dst.sel = ctx->temp_reg;
2480 alu.dst.chan = 0;
2481 alu.dst.write = 1;
2482 alu.is_op3 = 1;
2483 alu.last = 1;
2484 r = r600_bytecode_add_alu(ctx->bc, &alu);
2485 if (r)
2486 return r;
2487
2488 if (ctx->bc->chip_class == CAYMAN) {
2489 for (i = 0; i < 3; i++) {
2490 /* dst.z = exp(tmp.x) */
2491 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2492 alu.op = ALU_OP1_EXP_IEEE;
2493 alu.src[0].sel = ctx->temp_reg;
2494 alu.src[0].chan = 0;
2495 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2496 if (i == 2) {
2497 alu.dst.write = 1;
2498 alu.last = 1;
2499 } else
2500 alu.dst.write = 0;
2501 r = r600_bytecode_add_alu(ctx->bc, &alu);
2502 if (r)
2503 return r;
2504 }
2505 } else {
2506 /* dst.z = exp(tmp.x) */
2507 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2508 alu.op = ALU_OP1_EXP_IEEE;
2509 alu.src[0].sel = ctx->temp_reg;
2510 alu.src[0].chan = 0;
2511 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
2512 alu.last = 1;
2513 r = r600_bytecode_add_alu(ctx->bc, &alu);
2514 if (r)
2515 return r;
2516 }
2517 }
2518
2519 /* dst.x, <- 1.0 */
2520 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2521 alu.op = ALU_OP1_MOV;
2522 alu.src[0].sel = V_SQ_ALU_SRC_1; /*1.0*/
2523 alu.src[0].chan = 0;
2524 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
2525 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 0) & 1;
2526 r = r600_bytecode_add_alu(ctx->bc, &alu);
2527 if (r)
2528 return r;
2529
2530 /* dst.y = max(src.x, 0.0) */
2531 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2532 alu.op = ALU_OP2_MAX;
2533 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
2534 alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/
2535 alu.src[1].chan = 0;
2536 tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
2537 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 1) & 1;
2538 r = r600_bytecode_add_alu(ctx->bc, &alu);
2539 if (r)
2540 return r;
2541
2542 /* dst.w, <- 1.0 */
2543 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2544 alu.op = ALU_OP1_MOV;
2545 alu.src[0].sel = V_SQ_ALU_SRC_1;
2546 alu.src[0].chan = 0;
2547 tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
2548 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 3) & 1;
2549 alu.last = 1;
2550 r = r600_bytecode_add_alu(ctx->bc, &alu);
2551 if (r)
2552 return r;
2553
2554 return 0;
2555 }
2556
2557 static int tgsi_rsq(struct r600_shader_ctx *ctx)
2558 {
2559 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2560 struct r600_bytecode_alu alu;
2561 int i, r;
2562
2563 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2564
2565 /* XXX:
2566 * For state trackers other than OpenGL, we'll want to use
2567 * _RECIPSQRT_IEEE instead.
2568 */
2569 alu.op = ALU_OP1_RECIPSQRT_CLAMPED;
2570
2571 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
2572 r600_bytecode_src(&alu.src[i], &ctx->src[i], 0);
2573 r600_bytecode_src_set_abs(&alu.src[i]);
2574 }
2575 alu.dst.sel = ctx->temp_reg;
2576 alu.dst.write = 1;
2577 alu.last = 1;
2578 r = r600_bytecode_add_alu(ctx->bc, &alu);
2579 if (r)
2580 return r;
2581 /* replicate result */
2582 return tgsi_helper_tempx_replicate(ctx);
2583 }
2584
2585 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx)
2586 {
2587 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2588 struct r600_bytecode_alu alu;
2589 int i, r;
2590
2591 for (i = 0; i < 4; i++) {
2592 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2593 alu.src[0].sel = ctx->temp_reg;
2594 alu.op = ALU_OP1_MOV;
2595 alu.dst.chan = i;
2596 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2597 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
2598 if (i == 3)
2599 alu.last = 1;
2600 r = r600_bytecode_add_alu(ctx->bc, &alu);
2601 if (r)
2602 return r;
2603 }
2604 return 0;
2605 }
2606
2607 static int tgsi_trans_srcx_replicate(struct r600_shader_ctx *ctx)
2608 {
2609 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2610 struct r600_bytecode_alu alu;
2611 int i, r;
2612
2613 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2614 alu.op = ctx->inst_info->op;
2615 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
2616 r600_bytecode_src(&alu.src[i], &ctx->src[i], 0);
2617 }
2618 alu.dst.sel = ctx->temp_reg;
2619 alu.dst.write = 1;
2620 alu.last = 1;
2621 r = r600_bytecode_add_alu(ctx->bc, &alu);
2622 if (r)
2623 return r;
2624 /* replicate result */
2625 return tgsi_helper_tempx_replicate(ctx);
2626 }
2627
2628 static int cayman_pow(struct r600_shader_ctx *ctx)
2629 {
2630 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2631 int i, r;
2632 struct r600_bytecode_alu alu;
2633 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
2634
2635 for (i = 0; i < 3; i++) {
2636 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2637 alu.op = ALU_OP1_LOG_IEEE;
2638 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
2639 alu.dst.sel = ctx->temp_reg;
2640 alu.dst.chan = i;
2641 alu.dst.write = 1;
2642 if (i == 2)
2643 alu.last = 1;
2644 r = r600_bytecode_add_alu(ctx->bc, &alu);
2645 if (r)
2646 return r;
2647 }
2648
2649 /* b * LOG2(a) */
2650 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2651 alu.op = ALU_OP2_MUL;
2652 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
2653 alu.src[1].sel = ctx->temp_reg;
2654 alu.dst.sel = ctx->temp_reg;
2655 alu.dst.write = 1;
2656 alu.last = 1;
2657 r = r600_bytecode_add_alu(ctx->bc, &alu);
2658 if (r)
2659 return r;
2660
2661 for (i = 0; i < last_slot; i++) {
2662 /* POW(a,b) = EXP2(b * LOG2(a))*/
2663 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2664 alu.op = ALU_OP1_EXP_IEEE;
2665 alu.src[0].sel = ctx->temp_reg;
2666
2667 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2668 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
2669 if (i == last_slot - 1)
2670 alu.last = 1;
2671 r = r600_bytecode_add_alu(ctx->bc, &alu);
2672 if (r)
2673 return r;
2674 }
2675 return 0;
2676 }
2677
2678 static int tgsi_pow(struct r600_shader_ctx *ctx)
2679 {
2680 struct r600_bytecode_alu alu;
2681 int r;
2682
2683 /* LOG2(a) */
2684 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2685 alu.op = ALU_OP1_LOG_IEEE;
2686 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
2687 alu.dst.sel = ctx->temp_reg;
2688 alu.dst.write = 1;
2689 alu.last = 1;
2690 r = r600_bytecode_add_alu(ctx->bc, &alu);
2691 if (r)
2692 return r;
2693 /* b * LOG2(a) */
2694 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2695 alu.op = ALU_OP2_MUL;
2696 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
2697 alu.src[1].sel = ctx->temp_reg;
2698 alu.dst.sel = ctx->temp_reg;
2699 alu.dst.write = 1;
2700 alu.last = 1;
2701 r = r600_bytecode_add_alu(ctx->bc, &alu);
2702 if (r)
2703 return r;
2704 /* POW(a,b) = EXP2(b * LOG2(a))*/
2705 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2706 alu.op = ALU_OP1_EXP_IEEE;
2707 alu.src[0].sel = ctx->temp_reg;
2708 alu.dst.sel = ctx->temp_reg;
2709 alu.dst.write = 1;
2710 alu.last = 1;
2711 r = r600_bytecode_add_alu(ctx->bc, &alu);
2712 if (r)
2713 return r;
2714 return tgsi_helper_tempx_replicate(ctx);
2715 }
2716
2717 static int tgsi_divmod(struct r600_shader_ctx *ctx, int mod, int signed_op)
2718 {
2719 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2720 struct r600_bytecode_alu alu;
2721 int i, r, j;
2722 unsigned write_mask = inst->Dst[0].Register.WriteMask;
2723 int tmp0 = ctx->temp_reg;
2724 int tmp1 = r600_get_temp(ctx);
2725 int tmp2 = r600_get_temp(ctx);
2726 int tmp3 = r600_get_temp(ctx);
2727 /* Unsigned path:
2728 *
2729 * we need to represent src1 as src2*q + r, where q - quotient, r - remainder
2730 *
2731 * 1. tmp0.x = rcp (src2) = 2^32/src2 + e, where e is rounding error
2732 * 2. tmp0.z = lo (tmp0.x * src2)
2733 * 3. tmp0.w = -tmp0.z
2734 * 4. tmp0.y = hi (tmp0.x * src2)
2735 * 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src2))
2736 * 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error
2737 * 7. tmp1.x = tmp0.x - tmp0.w
2738 * 8. tmp1.y = tmp0.x + tmp0.w
2739 * 9. tmp0.x = (tmp0.y == 0 ? tmp1.y : tmp1.x)
2740 * 10. tmp0.z = hi(tmp0.x * src1) = q
2741 * 11. tmp0.y = lo (tmp0.z * src2) = src2*q = src1 - r
2742 *
2743 * 12. tmp0.w = src1 - tmp0.y = r
2744 * 13. tmp1.x = tmp0.w >= src2 = r >= src2 (uint comparison)
2745 * 14. tmp1.y = src1 >= tmp0.y = r >= 0 (uint comparison)
2746 *
2747 * if DIV
2748 *
2749 * 15. tmp1.z = tmp0.z + 1 = q + 1
2750 * 16. tmp1.w = tmp0.z - 1 = q - 1
2751 *
2752 * else MOD
2753 *
2754 * 15. tmp1.z = tmp0.w - src2 = r - src2
2755 * 16. tmp1.w = tmp0.w + src2 = r + src2
2756 *
2757 * endif
2758 *
2759 * 17. tmp1.x = tmp1.x & tmp1.y
2760 *
2761 * DIV: 18. tmp0.z = tmp1.x==0 ? tmp0.z : tmp1.z
2762 * MOD: 18. tmp0.z = tmp1.x==0 ? tmp0.w : tmp1.z
2763 *
2764 * 19. tmp0.z = tmp1.y==0 ? tmp1.w : tmp0.z
2765 * 20. dst = src2==0 ? MAX_UINT : tmp0.z
2766 *
2767 * Signed path:
2768 *
2769 * Same as unsigned, using abs values of the operands,
2770 * and fixing the sign of the result in the end.
2771 */
2772
2773 for (i = 0; i < 4; i++) {
2774 if (!(write_mask & (1<<i)))
2775 continue;
2776
2777 if (signed_op) {
2778
2779 /* tmp2.x = -src0 */
2780 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2781 alu.op = ALU_OP2_SUB_INT;
2782
2783 alu.dst.sel = tmp2;
2784 alu.dst.chan = 0;
2785 alu.dst.write = 1;
2786
2787 alu.src[0].sel = V_SQ_ALU_SRC_0;
2788
2789 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
2790
2791 alu.last = 1;
2792 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
2793 return r;
2794
2795 /* tmp2.y = -src1 */
2796 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2797 alu.op = ALU_OP2_SUB_INT;
2798
2799 alu.dst.sel = tmp2;
2800 alu.dst.chan = 1;
2801 alu.dst.write = 1;
2802
2803 alu.src[0].sel = V_SQ_ALU_SRC_0;
2804
2805 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
2806
2807 alu.last = 1;
2808 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
2809 return r;
2810
2811 /* tmp2.z sign bit is set if src0 and src2 signs are different */
2812 /* it will be a sign of the quotient */
2813 if (!mod) {
2814
2815 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2816 alu.op = ALU_OP2_XOR_INT;
2817
2818 alu.dst.sel = tmp2;
2819 alu.dst.chan = 2;
2820 alu.dst.write = 1;
2821
2822 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
2823 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
2824
2825 alu.last = 1;
2826 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
2827 return r;
2828 }
2829
2830 /* tmp2.x = |src0| */
2831 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2832 alu.op = ALU_OP3_CNDGE_INT;
2833 alu.is_op3 = 1;
2834
2835 alu.dst.sel = tmp2;
2836 alu.dst.chan = 0;
2837 alu.dst.write = 1;
2838
2839 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
2840 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
2841 alu.src[2].sel = tmp2;
2842 alu.src[2].chan = 0;
2843
2844 alu.last = 1;
2845 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
2846 return r;
2847
2848 /* tmp2.y = |src1| */
2849 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2850 alu.op = ALU_OP3_CNDGE_INT;
2851 alu.is_op3 = 1;
2852
2853 alu.dst.sel = tmp2;
2854 alu.dst.chan = 1;
2855 alu.dst.write = 1;
2856
2857 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
2858 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
2859 alu.src[2].sel = tmp2;
2860 alu.src[2].chan = 1;
2861
2862 alu.last = 1;
2863 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
2864 return r;
2865
2866 }
2867
2868 /* 1. tmp0.x = rcp_u (src2) = 2^32/src2 + e, where e is rounding error */
2869 if (ctx->bc->chip_class == CAYMAN) {
2870 /* tmp3.x = u2f(src2) */
2871 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2872 alu.op = ALU_OP1_UINT_TO_FLT;
2873
2874 alu.dst.sel = tmp3;
2875 alu.dst.chan = 0;
2876 alu.dst.write = 1;
2877
2878 if (signed_op) {
2879 alu.src[0].sel = tmp2;
2880 alu.src[0].chan = 1;
2881 } else {
2882 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
2883 }
2884
2885 alu.last = 1;
2886 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
2887 return r;
2888
2889 /* tmp0.x = recip(tmp3.x) */
2890 for (j = 0 ; j < 3; j++) {
2891 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2892 alu.op = ALU_OP1_RECIP_IEEE;
2893
2894 alu.dst.sel = tmp0;
2895 alu.dst.chan = j;
2896 alu.dst.write = (j == 0);
2897
2898 alu.src[0].sel = tmp3;
2899 alu.src[0].chan = 0;
2900
2901 if (j == 2)
2902 alu.last = 1;
2903 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
2904 return r;
2905 }
2906
2907 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2908 alu.op = ALU_OP2_MUL;
2909
2910 alu.src[0].sel = tmp0;
2911 alu.src[0].chan = 0;
2912
2913 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
2914 alu.src[1].value = 0x4f800000;
2915
2916 alu.dst.sel = tmp3;
2917 alu.dst.write = 1;
2918 alu.last = 1;
2919 r = r600_bytecode_add_alu(ctx->bc, &alu);
2920 if (r)
2921 return r;
2922
2923 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2924 alu.op = ALU_OP1_FLT_TO_UINT;
2925
2926 alu.dst.sel = tmp0;
2927 alu.dst.chan = 0;
2928 alu.dst.write = 1;
2929
2930 alu.src[0].sel = tmp3;
2931 alu.src[0].chan = 0;
2932
2933 alu.last = 1;
2934 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
2935 return r;
2936
2937 } else {
2938 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2939 alu.op = ALU_OP1_RECIP_UINT;
2940
2941 alu.dst.sel = tmp0;
2942 alu.dst.chan = 0;
2943 alu.dst.write = 1;
2944
2945 if (signed_op) {
2946 alu.src[0].sel = tmp2;
2947 alu.src[0].chan = 1;
2948 } else {
2949 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
2950 }
2951
2952 alu.last = 1;
2953 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
2954 return r;
2955 }
2956
2957 /* 2. tmp0.z = lo (tmp0.x * src2) */
2958 if (ctx->bc->chip_class == CAYMAN) {
2959 for (j = 0 ; j < 4; j++) {
2960 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2961 alu.op = ALU_OP2_MULLO_UINT;
2962
2963 alu.dst.sel = tmp0;
2964 alu.dst.chan = j;
2965 alu.dst.write = (j == 2);
2966
2967 alu.src[0].sel = tmp0;
2968 alu.src[0].chan = 0;
2969 if (signed_op) {
2970 alu.src[1].sel = tmp2;
2971 alu.src[1].chan = 1;
2972 } else {
2973 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
2974 }
2975
2976 alu.last = (j == 3);
2977 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
2978 return r;
2979 }
2980 } else {
2981 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2982 alu.op = ALU_OP2_MULLO_UINT;
2983
2984 alu.dst.sel = tmp0;
2985 alu.dst.chan = 2;
2986 alu.dst.write = 1;
2987
2988 alu.src[0].sel = tmp0;
2989 alu.src[0].chan = 0;
2990 if (signed_op) {
2991 alu.src[1].sel = tmp2;
2992 alu.src[1].chan = 1;
2993 } else {
2994 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
2995 }
2996
2997 alu.last = 1;
2998 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
2999 return r;
3000 }
3001
3002 /* 3. tmp0.w = -tmp0.z */
3003 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3004 alu.op = ALU_OP2_SUB_INT;
3005
3006 alu.dst.sel = tmp0;
3007 alu.dst.chan = 3;
3008 alu.dst.write = 1;
3009
3010 alu.src[0].sel = V_SQ_ALU_SRC_0;
3011 alu.src[1].sel = tmp0;
3012 alu.src[1].chan = 2;
3013
3014 alu.last = 1;
3015 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3016 return r;
3017
3018 /* 4. tmp0.y = hi (tmp0.x * src2) */
3019 if (ctx->bc->chip_class == CAYMAN) {
3020 for (j = 0 ; j < 4; j++) {
3021 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3022 alu.op = ALU_OP2_MULHI_UINT;
3023
3024 alu.dst.sel = tmp0;
3025 alu.dst.chan = j;
3026 alu.dst.write = (j == 1);
3027
3028 alu.src[0].sel = tmp0;
3029 alu.src[0].chan = 0;
3030
3031 if (signed_op) {
3032 alu.src[1].sel = tmp2;
3033 alu.src[1].chan = 1;
3034 } else {
3035 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
3036 }
3037 alu.last = (j == 3);
3038 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3039 return r;
3040 }
3041 } else {
3042 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3043 alu.op = ALU_OP2_MULHI_UINT;
3044
3045 alu.dst.sel = tmp0;
3046 alu.dst.chan = 1;
3047 alu.dst.write = 1;
3048
3049 alu.src[0].sel = tmp0;
3050 alu.src[0].chan = 0;
3051
3052 if (signed_op) {
3053 alu.src[1].sel = tmp2;
3054 alu.src[1].chan = 1;
3055 } else {
3056 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
3057 }
3058
3059 alu.last = 1;
3060 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3061 return r;
3062 }
3063
3064 /* 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src)) */
3065 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3066 alu.op = ALU_OP3_CNDE_INT;
3067 alu.is_op3 = 1;
3068
3069 alu.dst.sel = tmp0;
3070 alu.dst.chan = 2;
3071 alu.dst.write = 1;
3072
3073 alu.src[0].sel = tmp0;
3074 alu.src[0].chan = 1;
3075 alu.src[1].sel = tmp0;
3076 alu.src[1].chan = 3;
3077 alu.src[2].sel = tmp0;
3078 alu.src[2].chan = 2;
3079
3080 alu.last = 1;
3081 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3082 return r;
3083
3084 /* 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error */
3085 if (ctx->bc->chip_class == CAYMAN) {
3086 for (j = 0 ; j < 4; j++) {
3087 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3088 alu.op = ALU_OP2_MULHI_UINT;
3089
3090 alu.dst.sel = tmp0;
3091 alu.dst.chan = j;
3092 alu.dst.write = (j == 3);
3093
3094 alu.src[0].sel = tmp0;
3095 alu.src[0].chan = 2;
3096
3097 alu.src[1].sel = tmp0;
3098 alu.src[1].chan = 0;
3099
3100 alu.last = (j == 3);
3101 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3102 return r;
3103 }
3104 } else {
3105 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3106 alu.op = ALU_OP2_MULHI_UINT;
3107
3108 alu.dst.sel = tmp0;
3109 alu.dst.chan = 3;
3110 alu.dst.write = 1;
3111
3112 alu.src[0].sel = tmp0;
3113 alu.src[0].chan = 2;
3114
3115 alu.src[1].sel = tmp0;
3116 alu.src[1].chan = 0;
3117
3118 alu.last = 1;
3119 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3120 return r;
3121 }
3122
3123 /* 7. tmp1.x = tmp0.x - tmp0.w */
3124 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3125 alu.op = ALU_OP2_SUB_INT;
3126
3127 alu.dst.sel = tmp1;
3128 alu.dst.chan = 0;
3129 alu.dst.write = 1;
3130
3131 alu.src[0].sel = tmp0;
3132 alu.src[0].chan = 0;
3133 alu.src[1].sel = tmp0;
3134 alu.src[1].chan = 3;
3135
3136 alu.last = 1;
3137 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3138 return r;
3139
3140 /* 8. tmp1.y = tmp0.x + tmp0.w */
3141 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3142 alu.op = ALU_OP2_ADD_INT;
3143
3144 alu.dst.sel = tmp1;
3145 alu.dst.chan = 1;
3146 alu.dst.write = 1;
3147
3148 alu.src[0].sel = tmp0;
3149 alu.src[0].chan = 0;
3150 alu.src[1].sel = tmp0;
3151 alu.src[1].chan = 3;
3152
3153 alu.last = 1;
3154 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3155 return r;
3156
3157 /* 9. tmp0.x = (tmp0.y == 0 ? tmp1.y : tmp1.x) */
3158 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3159 alu.op = ALU_OP3_CNDE_INT;
3160 alu.is_op3 = 1;
3161
3162 alu.dst.sel = tmp0;
3163 alu.dst.chan = 0;
3164 alu.dst.write = 1;
3165
3166 alu.src[0].sel = tmp0;
3167 alu.src[0].chan = 1;
3168 alu.src[1].sel = tmp1;
3169 alu.src[1].chan = 1;
3170 alu.src[2].sel = tmp1;
3171 alu.src[2].chan = 0;
3172
3173 alu.last = 1;
3174 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3175 return r;
3176
3177 /* 10. tmp0.z = hi(tmp0.x * src1) = q */
3178 if (ctx->bc->chip_class == CAYMAN) {
3179 for (j = 0 ; j < 4; j++) {
3180 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3181 alu.op = ALU_OP2_MULHI_UINT;
3182
3183 alu.dst.sel = tmp0;
3184 alu.dst.chan = j;
3185 alu.dst.write = (j == 2);
3186
3187 alu.src[0].sel = tmp0;
3188 alu.src[0].chan = 0;
3189
3190 if (signed_op) {
3191 alu.src[1].sel = tmp2;
3192 alu.src[1].chan = 0;
3193 } else {
3194 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
3195 }
3196
3197 alu.last = (j == 3);
3198 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3199 return r;
3200 }
3201 } else {
3202 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3203 alu.op = ALU_OP2_MULHI_UINT;
3204
3205 alu.dst.sel = tmp0;
3206 alu.dst.chan = 2;
3207 alu.dst.write = 1;
3208
3209 alu.src[0].sel = tmp0;
3210 alu.src[0].chan = 0;
3211
3212 if (signed_op) {
3213 alu.src[1].sel = tmp2;
3214 alu.src[1].chan = 0;
3215 } else {
3216 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
3217 }
3218
3219 alu.last = 1;
3220 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3221 return r;
3222 }
3223
3224 /* 11. tmp0.y = lo (src2 * tmp0.z) = src2*q = src1 - r */
3225 if (ctx->bc->chip_class == CAYMAN) {
3226 for (j = 0 ; j < 4; j++) {
3227 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3228 alu.op = ALU_OP2_MULLO_UINT;
3229
3230 alu.dst.sel = tmp0;
3231 alu.dst.chan = j;
3232 alu.dst.write = (j == 1);
3233
3234 if (signed_op) {
3235 alu.src[0].sel = tmp2;
3236 alu.src[0].chan = 1;
3237 } else {
3238 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
3239 }
3240
3241 alu.src[1].sel = tmp0;
3242 alu.src[1].chan = 2;
3243
3244 alu.last = (j == 3);
3245 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3246 return r;
3247 }
3248 } else {
3249 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3250 alu.op = ALU_OP2_MULLO_UINT;
3251
3252 alu.dst.sel = tmp0;
3253 alu.dst.chan = 1;
3254 alu.dst.write = 1;
3255
3256 if (signed_op) {
3257 alu.src[0].sel = tmp2;
3258 alu.src[0].chan = 1;
3259 } else {
3260 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
3261 }
3262
3263 alu.src[1].sel = tmp0;
3264 alu.src[1].chan = 2;
3265
3266 alu.last = 1;
3267 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3268 return r;
3269 }
3270
3271 /* 12. tmp0.w = src1 - tmp0.y = r */
3272 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3273 alu.op = ALU_OP2_SUB_INT;
3274
3275 alu.dst.sel = tmp0;
3276 alu.dst.chan = 3;
3277 alu.dst.write = 1;
3278
3279 if (signed_op) {
3280 alu.src[0].sel = tmp2;
3281 alu.src[0].chan = 0;
3282 } else {
3283 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
3284 }
3285
3286 alu.src[1].sel = tmp0;
3287 alu.src[1].chan = 1;
3288
3289 alu.last = 1;
3290 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3291 return r;
3292
3293 /* 13. tmp1.x = tmp0.w >= src2 = r >= src2 */
3294 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3295 alu.op = ALU_OP2_SETGE_UINT;
3296
3297 alu.dst.sel = tmp1;
3298 alu.dst.chan = 0;
3299 alu.dst.write = 1;
3300
3301 alu.src[0].sel = tmp0;
3302 alu.src[0].chan = 3;
3303 if (signed_op) {
3304 alu.src[1].sel = tmp2;
3305 alu.src[1].chan = 1;
3306 } else {
3307 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
3308 }
3309
3310 alu.last = 1;
3311 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3312 return r;
3313
3314 /* 14. tmp1.y = src1 >= tmp0.y = r >= 0 */
3315 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3316 alu.op = ALU_OP2_SETGE_UINT;
3317
3318 alu.dst.sel = tmp1;
3319 alu.dst.chan = 1;
3320 alu.dst.write = 1;
3321
3322 if (signed_op) {
3323 alu.src[0].sel = tmp2;
3324 alu.src[0].chan = 0;
3325 } else {
3326 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
3327 }
3328
3329 alu.src[1].sel = tmp0;
3330 alu.src[1].chan = 1;
3331
3332 alu.last = 1;
3333 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3334 return r;
3335
3336 if (mod) { /* UMOD */
3337
3338 /* 15. tmp1.z = tmp0.w - src2 = r - src2 */
3339 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3340 alu.op = ALU_OP2_SUB_INT;
3341
3342 alu.dst.sel = tmp1;
3343 alu.dst.chan = 2;
3344 alu.dst.write = 1;
3345
3346 alu.src[0].sel = tmp0;
3347 alu.src[0].chan = 3;
3348
3349 if (signed_op) {
3350 alu.src[1].sel = tmp2;
3351 alu.src[1].chan = 1;
3352 } else {
3353 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
3354 }
3355
3356 alu.last = 1;
3357 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3358 return r;
3359
3360 /* 16. tmp1.w = tmp0.w + src2 = r + src2 */
3361 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3362 alu.op = ALU_OP2_ADD_INT;
3363
3364 alu.dst.sel = tmp1;
3365 alu.dst.chan = 3;
3366 alu.dst.write = 1;
3367
3368 alu.src[0].sel = tmp0;
3369 alu.src[0].chan = 3;
3370 if (signed_op) {
3371 alu.src[1].sel = tmp2;
3372 alu.src[1].chan = 1;
3373 } else {
3374 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
3375 }
3376
3377 alu.last = 1;
3378 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3379 return r;
3380
3381 } else { /* UDIV */
3382
3383 /* 15. tmp1.z = tmp0.z + 1 = q + 1 DIV */
3384 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3385 alu.op = ALU_OP2_ADD_INT;
3386
3387 alu.dst.sel = tmp1;
3388 alu.dst.chan = 2;
3389 alu.dst.write = 1;
3390
3391 alu.src[0].sel = tmp0;
3392 alu.src[0].chan = 2;
3393 alu.src[1].sel = V_SQ_ALU_SRC_1_INT;
3394
3395 alu.last = 1;
3396 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3397 return r;
3398
3399 /* 16. tmp1.w = tmp0.z - 1 = q - 1 */
3400 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3401 alu.op = ALU_OP2_ADD_INT;
3402
3403 alu.dst.sel = tmp1;
3404 alu.dst.chan = 3;
3405 alu.dst.write = 1;
3406
3407 alu.src[0].sel = tmp0;
3408 alu.src[0].chan = 2;
3409 alu.src[1].sel = V_SQ_ALU_SRC_M_1_INT;
3410
3411 alu.last = 1;
3412 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3413 return r;
3414
3415 }
3416
3417 /* 17. tmp1.x = tmp1.x & tmp1.y */
3418 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3419 alu.op = ALU_OP2_AND_INT;
3420
3421 alu.dst.sel = tmp1;
3422 alu.dst.chan = 0;
3423 alu.dst.write = 1;
3424
3425 alu.src[0].sel = tmp1;
3426 alu.src[0].chan = 0;
3427 alu.src[1].sel = tmp1;
3428 alu.src[1].chan = 1;
3429
3430 alu.last = 1;
3431 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3432 return r;
3433
3434 /* 18. tmp0.z = tmp1.x==0 ? tmp0.z : tmp1.z DIV */
3435 /* 18. tmp0.z = tmp1.x==0 ? tmp0.w : tmp1.z MOD */
3436 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3437 alu.op = ALU_OP3_CNDE_INT;
3438 alu.is_op3 = 1;
3439
3440 alu.dst.sel = tmp0;
3441 alu.dst.chan = 2;
3442 alu.dst.write = 1;
3443
3444 alu.src[0].sel = tmp1;
3445 alu.src[0].chan = 0;
3446 alu.src[1].sel = tmp0;
3447 alu.src[1].chan = mod ? 3 : 2;
3448 alu.src[2].sel = tmp1;
3449 alu.src[2].chan = 2;
3450
3451 alu.last = 1;
3452 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3453 return r;
3454
3455 /* 19. tmp0.z = tmp1.y==0 ? tmp1.w : tmp0.z */
3456 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3457 alu.op = ALU_OP3_CNDE_INT;
3458 alu.is_op3 = 1;
3459
3460 if (signed_op) {
3461 alu.dst.sel = tmp0;
3462 alu.dst.chan = 2;
3463 alu.dst.write = 1;
3464 } else {
3465 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3466 }
3467
3468 alu.src[0].sel = tmp1;
3469 alu.src[0].chan = 1;
3470 alu.src[1].sel = tmp1;
3471 alu.src[1].chan = 3;
3472 alu.src[2].sel = tmp0;
3473 alu.src[2].chan = 2;
3474
3475 alu.last = 1;
3476 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3477 return r;
3478
3479 if (signed_op) {
3480
3481 /* fix the sign of the result */
3482
3483 if (mod) {
3484
3485 /* tmp0.x = -tmp0.z */
3486 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3487 alu.op = ALU_OP2_SUB_INT;
3488
3489 alu.dst.sel = tmp0;
3490 alu.dst.chan = 0;
3491 alu.dst.write = 1;
3492
3493 alu.src[0].sel = V_SQ_ALU_SRC_0;
3494 alu.src[1].sel = tmp0;
3495 alu.src[1].chan = 2;
3496
3497 alu.last = 1;
3498 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3499 return r;
3500
3501 /* sign of the remainder is the same as the sign of src0 */
3502 /* tmp0.x = src0>=0 ? tmp0.z : tmp0.x */
3503 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3504 alu.op = ALU_OP3_CNDGE_INT;
3505 alu.is_op3 = 1;
3506
3507 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3508
3509 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
3510 alu.src[1].sel = tmp0;
3511 alu.src[1].chan = 2;
3512 alu.src[2].sel = tmp0;
3513 alu.src[2].chan = 0;
3514
3515 alu.last = 1;
3516 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3517 return r;
3518
3519 } else {
3520
3521 /* tmp0.x = -tmp0.z */
3522 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3523 alu.op = ALU_OP2_SUB_INT;
3524
3525 alu.dst.sel = tmp0;
3526 alu.dst.chan = 0;
3527 alu.dst.write = 1;
3528
3529 alu.src[0].sel = V_SQ_ALU_SRC_0;
3530 alu.src[1].sel = tmp0;
3531 alu.src[1].chan = 2;
3532
3533 alu.last = 1;
3534 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3535 return r;
3536
3537 /* fix the quotient sign (same as the sign of src0*src1) */
3538 /* tmp0.x = tmp2.z>=0 ? tmp0.z : tmp0.x */
3539 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3540 alu.op = ALU_OP3_CNDGE_INT;
3541 alu.is_op3 = 1;
3542
3543 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3544
3545 alu.src[0].sel = tmp2;
3546 alu.src[0].chan = 2;
3547 alu.src[1].sel = tmp0;
3548 alu.src[1].chan = 2;
3549 alu.src[2].sel = tmp0;
3550 alu.src[2].chan = 0;
3551
3552 alu.last = 1;
3553 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
3554 return r;
3555 }
3556 }
3557 }
3558 return 0;
3559 }
3560
3561 static int tgsi_udiv(struct r600_shader_ctx *ctx)
3562 {
3563 return tgsi_divmod(ctx, 0, 0);
3564 }
3565
3566 static int tgsi_umod(struct r600_shader_ctx *ctx)
3567 {
3568 return tgsi_divmod(ctx, 1, 0);
3569 }
3570
3571 static int tgsi_idiv(struct r600_shader_ctx *ctx)
3572 {
3573 return tgsi_divmod(ctx, 0, 1);
3574 }
3575
3576 static int tgsi_imod(struct r600_shader_ctx *ctx)
3577 {
3578 return tgsi_divmod(ctx, 1, 1);
3579 }
3580
3581
3582 static int tgsi_f2i(struct r600_shader_ctx *ctx)
3583 {
3584 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3585 struct r600_bytecode_alu alu;
3586 int i, r;
3587 unsigned write_mask = inst->Dst[0].Register.WriteMask;
3588 int last_inst = tgsi_last_instruction(write_mask);
3589
3590 for (i = 0; i < 4; i++) {
3591 if (!(write_mask & (1<<i)))
3592 continue;
3593
3594 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3595 alu.op = ALU_OP1_TRUNC;
3596
3597 alu.dst.sel = ctx->temp_reg;
3598 alu.dst.chan = i;
3599 alu.dst.write = 1;
3600
3601 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
3602 if (i == last_inst)
3603 alu.last = 1;
3604 r = r600_bytecode_add_alu(ctx->bc, &alu);
3605 if (r)
3606 return r;
3607 }
3608
3609 for (i = 0; i < 4; i++) {
3610 if (!(write_mask & (1<<i)))
3611 continue;
3612
3613 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3614 alu.op = ctx->inst_info->op;
3615
3616 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3617
3618 alu.src[0].sel = ctx->temp_reg;
3619 alu.src[0].chan = i;
3620
3621 if (i == last_inst || alu.op == ALU_OP1_FLT_TO_UINT)
3622 alu.last = 1;
3623 r = r600_bytecode_add_alu(ctx->bc, &alu);
3624 if (r)
3625 return r;
3626 }
3627
3628 return 0;
3629 }
3630
3631 static int tgsi_iabs(struct r600_shader_ctx *ctx)
3632 {
3633 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3634 struct r600_bytecode_alu alu;
3635 int i, r;
3636 unsigned write_mask = inst->Dst[0].Register.WriteMask;
3637 int last_inst = tgsi_last_instruction(write_mask);
3638
3639 /* tmp = -src */
3640 for (i = 0; i < 4; i++) {
3641 if (!(write_mask & (1<<i)))
3642 continue;
3643
3644 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3645 alu.op = ALU_OP2_SUB_INT;
3646
3647 alu.dst.sel = ctx->temp_reg;
3648 alu.dst.chan = i;
3649 alu.dst.write = 1;
3650
3651 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
3652 alu.src[0].sel = V_SQ_ALU_SRC_0;
3653
3654 if (i == last_inst)
3655 alu.last = 1;
3656 r = r600_bytecode_add_alu(ctx->bc, &alu);
3657 if (r)
3658 return r;
3659 }
3660
3661 /* dst = (src >= 0 ? src : tmp) */
3662 for (i = 0; i < 4; i++) {
3663 if (!(write_mask & (1<<i)))
3664 continue;
3665
3666 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3667 alu.op = ALU_OP3_CNDGE_INT;
3668 alu.is_op3 = 1;
3669 alu.dst.write = 1;
3670
3671 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3672
3673 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
3674 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
3675 alu.src[2].sel = ctx->temp_reg;
3676 alu.src[2].chan = i;
3677
3678 if (i == last_inst)
3679 alu.last = 1;
3680 r = r600_bytecode_add_alu(ctx->bc, &alu);
3681 if (r)
3682 return r;
3683 }
3684 return 0;
3685 }
3686
3687 static int tgsi_issg(struct r600_shader_ctx *ctx)
3688 {
3689 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3690 struct r600_bytecode_alu alu;
3691 int i, r;
3692 unsigned write_mask = inst->Dst[0].Register.WriteMask;
3693 int last_inst = tgsi_last_instruction(write_mask);
3694
3695 /* tmp = (src >= 0 ? src : -1) */
3696 for (i = 0; i < 4; i++) {
3697 if (!(write_mask & (1<<i)))
3698 continue;
3699
3700 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3701 alu.op = ALU_OP3_CNDGE_INT;
3702 alu.is_op3 = 1;
3703
3704 alu.dst.sel = ctx->temp_reg;
3705 alu.dst.chan = i;
3706 alu.dst.write = 1;
3707
3708 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
3709 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
3710 alu.src[2].sel = V_SQ_ALU_SRC_M_1_INT;
3711
3712 if (i == last_inst)
3713 alu.last = 1;
3714 r = r600_bytecode_add_alu(ctx->bc, &alu);
3715 if (r)
3716 return r;
3717 }
3718
3719 /* dst = (tmp > 0 ? 1 : tmp) */
3720 for (i = 0; i < 4; i++) {
3721 if (!(write_mask & (1<<i)))
3722 continue;
3723
3724 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3725 alu.op = ALU_OP3_CNDGT_INT;
3726 alu.is_op3 = 1;
3727 alu.dst.write = 1;
3728
3729 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3730
3731 alu.src[0].sel = ctx->temp_reg;
3732 alu.src[0].chan = i;
3733
3734 alu.src[1].sel = V_SQ_ALU_SRC_1_INT;
3735
3736 alu.src[2].sel = ctx->temp_reg;
3737 alu.src[2].chan = i;
3738
3739 if (i == last_inst)
3740 alu.last = 1;
3741 r = r600_bytecode_add_alu(ctx->bc, &alu);
3742 if (r)
3743 return r;
3744 }
3745 return 0;
3746 }
3747
3748
3749
3750 static int tgsi_ssg(struct r600_shader_ctx *ctx)
3751 {
3752 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3753 struct r600_bytecode_alu alu;
3754 int i, r;
3755
3756 /* tmp = (src > 0 ? 1 : src) */
3757 for (i = 0; i < 4; i++) {
3758 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3759 alu.op = ALU_OP3_CNDGT;
3760 alu.is_op3 = 1;
3761
3762 alu.dst.sel = ctx->temp_reg;
3763 alu.dst.chan = i;
3764
3765 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
3766 alu.src[1].sel = V_SQ_ALU_SRC_1;
3767 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
3768
3769 if (i == 3)
3770 alu.last = 1;
3771 r = r600_bytecode_add_alu(ctx->bc, &alu);
3772 if (r)
3773 return r;
3774 }
3775
3776 /* dst = (-tmp > 0 ? -1 : tmp) */
3777 for (i = 0; i < 4; i++) {
3778 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3779 alu.op = ALU_OP3_CNDGT;
3780 alu.is_op3 = 1;
3781 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3782
3783 alu.src[0].sel = ctx->temp_reg;
3784 alu.src[0].chan = i;
3785 alu.src[0].neg = 1;
3786
3787 alu.src[1].sel = V_SQ_ALU_SRC_1;
3788 alu.src[1].neg = 1;
3789
3790 alu.src[2].sel = ctx->temp_reg;
3791 alu.src[2].chan = i;
3792
3793 if (i == 3)
3794 alu.last = 1;
3795 r = r600_bytecode_add_alu(ctx->bc, &alu);
3796 if (r)
3797 return r;
3798 }
3799 return 0;
3800 }
3801
3802 static int tgsi_helper_copy(struct r600_shader_ctx *ctx, struct tgsi_full_instruction *inst)
3803 {
3804 struct r600_bytecode_alu alu;
3805 int i, r;
3806
3807 for (i = 0; i < 4; i++) {
3808 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3809 if (!(inst->Dst[0].Register.WriteMask & (1 << i))) {
3810 alu.op = ALU_OP0_NOP;
3811 alu.dst.chan = i;
3812 } else {
3813 alu.op = ALU_OP1_MOV;
3814 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3815 alu.src[0].sel = ctx->temp_reg;
3816 alu.src[0].chan = i;
3817 }
3818 if (i == 3) {
3819 alu.last = 1;
3820 }
3821 r = r600_bytecode_add_alu(ctx->bc, &alu);
3822 if (r)
3823 return r;
3824 }
3825 return 0;
3826 }
3827
3828 static int tgsi_op3(struct r600_shader_ctx *ctx)
3829 {
3830 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3831 struct r600_bytecode_alu alu;
3832 int i, j, r;
3833 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
3834
3835 for (i = 0; i < lasti + 1; i++) {
3836 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
3837 continue;
3838
3839 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3840 alu.op = ctx->inst_info->op;
3841 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
3842 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
3843 }
3844
3845 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3846 alu.dst.chan = i;
3847 alu.dst.write = 1;
3848 alu.is_op3 = 1;
3849 if (i == lasti) {
3850 alu.last = 1;
3851 }
3852 r = r600_bytecode_add_alu(ctx->bc, &alu);
3853 if (r)
3854 return r;
3855 }
3856 return 0;
3857 }
3858
3859 static int tgsi_dp(struct r600_shader_ctx *ctx)
3860 {
3861 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3862 struct r600_bytecode_alu alu;
3863 int i, j, r;
3864
3865 for (i = 0; i < 4; i++) {
3866 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3867 alu.op = ctx->inst_info->op;
3868 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
3869 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
3870 }
3871
3872 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3873 alu.dst.chan = i;
3874 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
3875 /* handle some special cases */
3876 switch (ctx->inst_info->tgsi_opcode) {
3877 case TGSI_OPCODE_DP2:
3878 if (i > 1) {
3879 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
3880 alu.src[0].chan = alu.src[1].chan = 0;
3881 }
3882 break;
3883 case TGSI_OPCODE_DP3:
3884 if (i > 2) {
3885 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
3886 alu.src[0].chan = alu.src[1].chan = 0;
3887 }
3888 break;
3889 case TGSI_OPCODE_DPH:
3890 if (i == 3) {
3891 alu.src[0].sel = V_SQ_ALU_SRC_1;
3892 alu.src[0].chan = 0;
3893 alu.src[0].neg = 0;
3894 }
3895 break;
3896 default:
3897 break;
3898 }
3899 if (i == 3) {
3900 alu.last = 1;
3901 }
3902 r = r600_bytecode_add_alu(ctx->bc, &alu);
3903 if (r)
3904 return r;
3905 }
3906 return 0;
3907 }
3908
3909 static inline boolean tgsi_tex_src_requires_loading(struct r600_shader_ctx *ctx,
3910 unsigned index)
3911 {
3912 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3913 return (inst->Src[index].Register.File != TGSI_FILE_TEMPORARY &&
3914 inst->Src[index].Register.File != TGSI_FILE_INPUT &&
3915 inst->Src[index].Register.File != TGSI_FILE_OUTPUT) ||
3916 ctx->src[index].neg || ctx->src[index].abs;
3917 }
3918
3919 static inline unsigned tgsi_tex_get_src_gpr(struct r600_shader_ctx *ctx,
3920 unsigned index)
3921 {
3922 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3923 return ctx->file_offset[inst->Src[index].Register.File] + inst->Src[index].Register.Index;
3924 }
3925
3926 static int do_vtx_fetch_inst(struct r600_shader_ctx *ctx, boolean src_requires_loading)
3927 {
3928 struct r600_bytecode_vtx vtx;
3929 struct r600_bytecode_alu alu;
3930 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3931 int src_gpr, r, i;
3932 int id = tgsi_tex_get_src_gpr(ctx, 1);
3933
3934 src_gpr = tgsi_tex_get_src_gpr(ctx, 0);
3935 if (src_requires_loading) {
3936 for (i = 0; i < 4; i++) {
3937 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3938 alu.op = ALU_OP1_MOV;
3939 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
3940 alu.dst.sel = ctx->temp_reg;
3941 alu.dst.chan = i;
3942 if (i == 3)
3943 alu.last = 1;
3944 alu.dst.write = 1;
3945 r = r600_bytecode_add_alu(ctx->bc, &alu);
3946 if (r)
3947 return r;
3948 }
3949 src_gpr = ctx->temp_reg;
3950 }
3951
3952 memset(&vtx, 0, sizeof(vtx));
3953 vtx.op = FETCH_OP_VFETCH;
3954 vtx.buffer_id = id + R600_MAX_CONST_BUFFERS;
3955 vtx.fetch_type = 2; /* VTX_FETCH_NO_INDEX_OFFSET */
3956 vtx.src_gpr = src_gpr;
3957 vtx.mega_fetch_count = 16;
3958 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
3959 vtx.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; /* SEL_X */
3960 vtx.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7; /* SEL_Y */
3961 vtx.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7; /* SEL_Z */
3962 vtx.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7; /* SEL_W */
3963 vtx.use_const_fields = 1;
3964 vtx.srf_mode_all = 1; /* SRF_MODE_NO_ZERO */
3965
3966 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
3967 return r;
3968
3969 if (ctx->bc->chip_class >= EVERGREEN)
3970 return 0;
3971
3972 for (i = 0; i < 4; i++) {
3973 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
3974 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
3975 continue;
3976
3977 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3978 alu.op = ALU_OP2_AND_INT;
3979
3980 alu.dst.chan = i;
3981 alu.dst.sel = vtx.dst_gpr;
3982 alu.dst.write = 1;
3983
3984 alu.src[0].sel = vtx.dst_gpr;
3985 alu.src[0].chan = i;
3986
3987 alu.src[1].sel = 512 + (id * 2);
3988 alu.src[1].chan = i % 4;
3989 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
3990
3991 if (i == lasti)
3992 alu.last = 1;
3993 r = r600_bytecode_add_alu(ctx->bc, &alu);
3994 if (r)
3995 return r;
3996 }
3997
3998 if (inst->Dst[0].Register.WriteMask & 3) {
3999 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4000 alu.op = ALU_OP2_OR_INT;
4001
4002 alu.dst.chan = 3;
4003 alu.dst.sel = vtx.dst_gpr;
4004 alu.dst.write = 1;
4005
4006 alu.src[0].sel = vtx.dst_gpr;
4007 alu.src[0].chan = 3;
4008
4009 alu.src[1].sel = 512 + (id * 2) + 1;
4010 alu.src[1].chan = 0;
4011 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
4012
4013 alu.last = 1;
4014 r = r600_bytecode_add_alu(ctx->bc, &alu);
4015 if (r)
4016 return r;
4017 }
4018 return 0;
4019 }
4020
4021 static int r600_do_buffer_txq(struct r600_shader_ctx *ctx)
4022 {
4023 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4024 struct r600_bytecode_alu alu;
4025 int r;
4026 int id = tgsi_tex_get_src_gpr(ctx, 1);
4027
4028 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4029 alu.op = ALU_OP1_MOV;
4030
4031 if (ctx->bc->chip_class >= EVERGREEN) {
4032 alu.src[0].sel = 512 + (id / 4);
4033 alu.src[0].chan = id % 4;
4034 } else {
4035 /* r600 we have them at channel 2 of the second dword */
4036 alu.src[0].sel = 512 + (id * 2) + 1;
4037 alu.src[0].chan = 1;
4038 }
4039 alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
4040 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
4041 alu.last = 1;
4042 r = r600_bytecode_add_alu(ctx->bc, &alu);
4043 if (r)
4044 return r;
4045 return 0;
4046 }
4047
4048 static int tgsi_tex(struct r600_shader_ctx *ctx)
4049 {
4050 static float one_point_five = 1.5f;
4051 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4052 struct r600_bytecode_tex tex;
4053 struct r600_bytecode_alu alu;
4054 unsigned src_gpr;
4055 int r, i, j;
4056 int opcode;
4057 bool read_compressed_msaa = ctx->bc->msaa_texture_mode == MSAA_TEXTURE_COMPRESSED &&
4058 inst->Instruction.Opcode == TGSI_OPCODE_TXF &&
4059 (inst->Texture.Texture == TGSI_TEXTURE_2D_MSAA ||
4060 inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY_MSAA);
4061 /* Texture fetch instructions can only use gprs as source.
4062 * Also they cannot negate the source or take the absolute value */
4063 const boolean src_requires_loading = (inst->Instruction.Opcode != TGSI_OPCODE_TXQ_LZ &&
4064 tgsi_tex_src_requires_loading(ctx, 0)) ||
4065 read_compressed_msaa;
4066 boolean src_loaded = FALSE;
4067 unsigned sampler_src_reg = inst->Instruction.Opcode == TGSI_OPCODE_TXQ_LZ ? 0 : 1;
4068 int8_t offset_x = 0, offset_y = 0, offset_z = 0;
4069 boolean has_txq_cube_array_z = false;
4070
4071 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ &&
4072 ((inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
4073 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY)))
4074 if (inst->Dst[0].Register.WriteMask & 4) {
4075 ctx->shader->has_txq_cube_array_z_comp = true;
4076 has_txq_cube_array_z = true;
4077 }
4078
4079 if (inst->Instruction.Opcode == TGSI_OPCODE_TEX2 ||
4080 inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
4081 inst->Instruction.Opcode == TGSI_OPCODE_TXL2)
4082 sampler_src_reg = 2;
4083
4084 src_gpr = tgsi_tex_get_src_gpr(ctx, 0);
4085
4086 if (inst->Texture.Texture == TGSI_TEXTURE_BUFFER) {
4087 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ) {
4088 ctx->shader->uses_tex_buffers = true;
4089 return r600_do_buffer_txq(ctx);
4090 }
4091 else if (inst->Instruction.Opcode == TGSI_OPCODE_TXF) {
4092 if (ctx->bc->chip_class < EVERGREEN)
4093 ctx->shader->uses_tex_buffers = true;
4094 return do_vtx_fetch_inst(ctx, src_requires_loading);
4095 }
4096 }
4097
4098 if (inst->Instruction.Opcode == TGSI_OPCODE_TXF) {
4099 /* get offset values */
4100 if (inst->Texture.NumOffsets) {
4101 assert(inst->Texture.NumOffsets == 1);
4102
4103 offset_x = ctx->literals[inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleX] << 1;
4104 offset_y = ctx->literals[inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleY] << 1;
4105 offset_z = ctx->literals[inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleZ] << 1;
4106 }
4107 } else if (inst->Instruction.Opcode == TGSI_OPCODE_TXD) {
4108 /* TGSI moves the sampler to src reg 3 for TXD */
4109 sampler_src_reg = 3;
4110
4111 for (i = 1; i < 3; i++) {
4112 /* set gradients h/v */
4113 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
4114 tex.op = (i == 1) ? FETCH_OP_SET_GRADIENTS_H :
4115 FETCH_OP_SET_GRADIENTS_V;
4116 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
4117 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
4118
4119 if (tgsi_tex_src_requires_loading(ctx, i)) {
4120 tex.src_gpr = r600_get_temp(ctx);
4121 tex.src_sel_x = 0;
4122 tex.src_sel_y = 1;
4123 tex.src_sel_z = 2;
4124 tex.src_sel_w = 3;
4125
4126 for (j = 0; j < 4; j++) {
4127 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4128 alu.op = ALU_OP1_MOV;
4129 r600_bytecode_src(&alu.src[0], &ctx->src[i], j);
4130 alu.dst.sel = tex.src_gpr;
4131 alu.dst.chan = j;
4132 if (j == 3)
4133 alu.last = 1;
4134 alu.dst.write = 1;
4135 r = r600_bytecode_add_alu(ctx->bc, &alu);
4136 if (r)
4137 return r;
4138 }
4139
4140 } else {
4141 tex.src_gpr = tgsi_tex_get_src_gpr(ctx, i);
4142 tex.src_sel_x = ctx->src[i].swizzle[0];
4143 tex.src_sel_y = ctx->src[i].swizzle[1];
4144 tex.src_sel_z = ctx->src[i].swizzle[2];
4145 tex.src_sel_w = ctx->src[i].swizzle[3];
4146 tex.src_rel = ctx->src[i].rel;
4147 }
4148 tex.dst_gpr = ctx->temp_reg; /* just to avoid confusing the asm scheduler */
4149 tex.dst_sel_x = tex.dst_sel_y = tex.dst_sel_z = tex.dst_sel_w = 7;
4150 if (inst->Texture.Texture != TGSI_TEXTURE_RECT) {
4151 tex.coord_type_x = 1;
4152 tex.coord_type_y = 1;
4153 tex.coord_type_z = 1;
4154 tex.coord_type_w = 1;
4155 }
4156 r = r600_bytecode_add_tex(ctx->bc, &tex);
4157 if (r)
4158 return r;
4159 }
4160 } else if (inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
4161 int out_chan;
4162 /* Add perspective divide */
4163 if (ctx->bc->chip_class == CAYMAN) {
4164 out_chan = 2;
4165 for (i = 0; i < 3; i++) {
4166 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4167 alu.op = ALU_OP1_RECIP_IEEE;
4168 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
4169
4170 alu.dst.sel = ctx->temp_reg;
4171 alu.dst.chan = i;
4172 if (i == 2)
4173 alu.last = 1;
4174 if (out_chan == i)
4175 alu.dst.write = 1;
4176 r = r600_bytecode_add_alu(ctx->bc, &alu);
4177 if (r)
4178 return r;
4179 }
4180
4181 } else {
4182 out_chan = 3;
4183 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4184 alu.op = ALU_OP1_RECIP_IEEE;
4185 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
4186
4187 alu.dst.sel = ctx->temp_reg;
4188 alu.dst.chan = out_chan;
4189 alu.last = 1;
4190 alu.dst.write = 1;
4191 r = r600_bytecode_add_alu(ctx->bc, &alu);
4192 if (r)
4193 return r;
4194 }
4195
4196 for (i = 0; i < 3; i++) {
4197 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4198 alu.op = ALU_OP2_MUL;
4199 alu.src[0].sel = ctx->temp_reg;
4200 alu.src[0].chan = out_chan;
4201 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4202 alu.dst.sel = ctx->temp_reg;
4203 alu.dst.chan = i;
4204 alu.dst.write = 1;
4205 r = r600_bytecode_add_alu(ctx->bc, &alu);
4206 if (r)
4207 return r;
4208 }
4209 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4210 alu.op = ALU_OP1_MOV;
4211 alu.src[0].sel = V_SQ_ALU_SRC_1;
4212 alu.src[0].chan = 0;
4213 alu.dst.sel = ctx->temp_reg;
4214 alu.dst.chan = 3;
4215 alu.last = 1;
4216 alu.dst.write = 1;
4217 r = r600_bytecode_add_alu(ctx->bc, &alu);
4218 if (r)
4219 return r;
4220 src_loaded = TRUE;
4221 src_gpr = ctx->temp_reg;
4222 }
4223
4224 if ((inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
4225 inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
4226 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
4227 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) &&
4228 inst->Instruction.Opcode != TGSI_OPCODE_TXQ &&
4229 inst->Instruction.Opcode != TGSI_OPCODE_TXQ_LZ) {
4230
4231 static const unsigned src0_swizzle[] = {2, 2, 0, 1};
4232 static const unsigned src1_swizzle[] = {1, 0, 2, 2};
4233
4234 /* tmp1.xyzw = CUBE(R0.zzxy, R0.yxzz) */
4235 for (i = 0; i < 4; i++) {
4236 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4237 alu.op = ALU_OP2_CUBE;
4238 r600_bytecode_src(&alu.src[0], &ctx->src[0], src0_swizzle[i]);
4239 r600_bytecode_src(&alu.src[1], &ctx->src[0], src1_swizzle[i]);
4240 alu.dst.sel = ctx->temp_reg;
4241 alu.dst.chan = i;
4242 if (i == 3)
4243 alu.last = 1;
4244 alu.dst.write = 1;
4245 r = r600_bytecode_add_alu(ctx->bc, &alu);
4246 if (r)
4247 return r;
4248 }
4249
4250 /* tmp1.z = RCP_e(|tmp1.z|) */
4251 if (ctx->bc->chip_class == CAYMAN) {
4252 for (i = 0; i < 3; i++) {
4253 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4254 alu.op = ALU_OP1_RECIP_IEEE;
4255 alu.src[0].sel = ctx->temp_reg;
4256 alu.src[0].chan = 2;
4257 alu.src[0].abs = 1;
4258 alu.dst.sel = ctx->temp_reg;
4259 alu.dst.chan = i;
4260 if (i == 2)
4261 alu.dst.write = 1;
4262 if (i == 2)
4263 alu.last = 1;
4264 r = r600_bytecode_add_alu(ctx->bc, &alu);
4265 if (r)
4266 return r;
4267 }
4268 } else {
4269 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4270 alu.op = ALU_OP1_RECIP_IEEE;
4271 alu.src[0].sel = ctx->temp_reg;
4272 alu.src[0].chan = 2;
4273 alu.src[0].abs = 1;
4274 alu.dst.sel = ctx->temp_reg;
4275 alu.dst.chan = 2;
4276 alu.dst.write = 1;
4277 alu.last = 1;
4278 r = r600_bytecode_add_alu(ctx->bc, &alu);
4279 if (r)
4280 return r;
4281 }
4282
4283 /* MULADD R0.x, R0.x, PS1, (0x3FC00000, 1.5f).x
4284 * MULADD R0.y, R0.y, PS1, (0x3FC00000, 1.5f).x
4285 * muladd has no writemask, have to use another temp
4286 */
4287 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4288 alu.op = ALU_OP3_MULADD;
4289 alu.is_op3 = 1;
4290
4291 alu.src[0].sel = ctx->temp_reg;
4292 alu.src[0].chan = 0;
4293 alu.src[1].sel = ctx->temp_reg;
4294 alu.src[1].chan = 2;
4295
4296 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
4297 alu.src[2].chan = 0;
4298 alu.src[2].value = *(uint32_t *)&one_point_five;
4299
4300 alu.dst.sel = ctx->temp_reg;
4301 alu.dst.chan = 0;
4302 alu.dst.write = 1;
4303
4304 r = r600_bytecode_add_alu(ctx->bc, &alu);
4305 if (r)
4306 return r;
4307
4308 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4309 alu.op = ALU_OP3_MULADD;
4310 alu.is_op3 = 1;
4311
4312 alu.src[0].sel = ctx->temp_reg;
4313 alu.src[0].chan = 1;
4314 alu.src[1].sel = ctx->temp_reg;
4315 alu.src[1].chan = 2;
4316
4317 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
4318 alu.src[2].chan = 0;
4319 alu.src[2].value = *(uint32_t *)&one_point_five;
4320
4321 alu.dst.sel = ctx->temp_reg;
4322 alu.dst.chan = 1;
4323 alu.dst.write = 1;
4324
4325 alu.last = 1;
4326 r = r600_bytecode_add_alu(ctx->bc, &alu);
4327 if (r)
4328 return r;
4329 /* write initial compare value into Z component
4330 - W src 0 for shadow cube
4331 - X src 1 for shadow cube array */
4332 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
4333 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
4334 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4335 alu.op = ALU_OP1_MOV;
4336 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY)
4337 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
4338 else
4339 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
4340 alu.dst.sel = ctx->temp_reg;
4341 alu.dst.chan = 2;
4342 alu.dst.write = 1;
4343 alu.last = 1;
4344 r = r600_bytecode_add_alu(ctx->bc, &alu);
4345 if (r)
4346 return r;
4347 }
4348
4349 if (inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
4350 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
4351 if (ctx->bc->chip_class >= EVERGREEN) {
4352 int mytmp = r600_get_temp(ctx);
4353 static const float eight = 8.0f;
4354 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4355 alu.op = ALU_OP1_MOV;
4356 alu.src[0].sel = ctx->temp_reg;
4357 alu.src[0].chan = 3;
4358 alu.dst.sel = mytmp;
4359 alu.dst.chan = 0;
4360 alu.dst.write = 1;
4361 alu.last = 1;
4362 r = r600_bytecode_add_alu(ctx->bc, &alu);
4363 if (r)
4364 return r;
4365
4366 /* have to multiply original layer by 8 and add to face id (temp.w) in Z */
4367 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4368 alu.op = ALU_OP3_MULADD;
4369 alu.is_op3 = 1;
4370 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
4371 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
4372 alu.src[1].chan = 0;
4373 alu.src[1].value = *(uint32_t *)&eight;
4374 alu.src[2].sel = mytmp;
4375 alu.src[2].chan = 0;
4376 alu.dst.sel = ctx->temp_reg;
4377 alu.dst.chan = 3;
4378 alu.dst.write = 1;
4379 alu.last = 1;
4380 r = r600_bytecode_add_alu(ctx->bc, &alu);
4381 if (r)
4382 return r;
4383 } else if (ctx->bc->chip_class < EVERGREEN) {
4384 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
4385 tex.op = FETCH_OP_SET_CUBEMAP_INDEX;
4386 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
4387 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
4388 tex.src_gpr = r600_get_temp(ctx);
4389 tex.src_sel_x = 0;
4390 tex.src_sel_y = 0;
4391 tex.src_sel_z = 0;
4392 tex.src_sel_w = 0;
4393 tex.dst_sel_x = tex.dst_sel_y = tex.dst_sel_z = tex.dst_sel_w = 7;
4394 tex.coord_type_x = 1;
4395 tex.coord_type_y = 1;
4396 tex.coord_type_z = 1;
4397 tex.coord_type_w = 1;
4398 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4399 alu.op = ALU_OP1_MOV;
4400 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
4401 alu.dst.sel = tex.src_gpr;
4402 alu.dst.chan = 0;
4403 alu.last = 1;
4404 alu.dst.write = 1;
4405 r = r600_bytecode_add_alu(ctx->bc, &alu);
4406 if (r)
4407 return r;
4408
4409 r = r600_bytecode_add_tex(ctx->bc, &tex);
4410 if (r)
4411 return r;
4412 }
4413
4414 }
4415
4416 /* for cube forms of lod and bias we need to route things */
4417 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB ||
4418 inst->Instruction.Opcode == TGSI_OPCODE_TXL ||
4419 inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
4420 inst->Instruction.Opcode == TGSI_OPCODE_TXL2) {
4421 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4422 alu.op = ALU_OP1_MOV;
4423 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
4424 inst->Instruction.Opcode == TGSI_OPCODE_TXL2)
4425 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
4426 else
4427 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
4428 alu.dst.sel = ctx->temp_reg;
4429 alu.dst.chan = 2;
4430 alu.last = 1;
4431 alu.dst.write = 1;
4432 r = r600_bytecode_add_alu(ctx->bc, &alu);
4433 if (r)
4434 return r;
4435 }
4436
4437 src_loaded = TRUE;
4438 src_gpr = ctx->temp_reg;
4439 }
4440
4441 if (src_requires_loading && !src_loaded) {
4442 for (i = 0; i < 4; i++) {
4443 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4444 alu.op = ALU_OP1_MOV;
4445 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4446 alu.dst.sel = ctx->temp_reg;
4447 alu.dst.chan = i;
4448 if (i == 3)
4449 alu.last = 1;
4450 alu.dst.write = 1;
4451 r = r600_bytecode_add_alu(ctx->bc, &alu);
4452 if (r)
4453 return r;
4454 }
4455 src_loaded = TRUE;
4456 src_gpr = ctx->temp_reg;
4457 }
4458
4459 /* Obtain the sample index for reading a compressed MSAA color texture.
4460 * To read the FMASK, we use the ldfptr instruction, which tells us
4461 * where the samples are stored.
4462 * For uncompressed 8x MSAA surfaces, ldfptr should return 0x76543210,
4463 * which is the identity mapping. Each nibble says which physical sample
4464 * should be fetched to get that sample.
4465 *
4466 * Assume src.z contains the sample index. It should be modified like this:
4467 * src.z = (ldfptr() >> (src.z * 4)) & 0xF;
4468 * Then fetch the texel with src.
4469 */
4470 if (read_compressed_msaa) {
4471 unsigned sample_chan = inst->Texture.Texture == TGSI_TEXTURE_2D_MSAA ? 3 : 4;
4472 unsigned temp = r600_get_temp(ctx);
4473 assert(src_loaded);
4474
4475 /* temp.w = ldfptr() */
4476 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
4477 tex.op = FETCH_OP_LD;
4478 tex.inst_mod = 1; /* to indicate this is ldfptr */
4479 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
4480 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
4481 tex.src_gpr = src_gpr;
4482 tex.dst_gpr = temp;
4483 tex.dst_sel_x = 7; /* mask out these components */
4484 tex.dst_sel_y = 7;
4485 tex.dst_sel_z = 7;
4486 tex.dst_sel_w = 0; /* store X */
4487 tex.src_sel_x = 0;
4488 tex.src_sel_y = 1;
4489 tex.src_sel_z = 2;
4490 tex.src_sel_w = 3;
4491 tex.offset_x = offset_x;
4492 tex.offset_y = offset_y;
4493 tex.offset_z = offset_z;
4494 r = r600_bytecode_add_tex(ctx->bc, &tex);
4495 if (r)
4496 return r;
4497
4498 /* temp.x = sample_index*4 */
4499 if (ctx->bc->chip_class == CAYMAN) {
4500 for (i = 0 ; i < 4; i++) {
4501 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4502 alu.op = ctx->inst_info->op;
4503 alu.src[0].sel = src_gpr;
4504 alu.src[0].chan = sample_chan;
4505 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
4506 alu.src[1].value = 4;
4507 alu.dst.sel = temp;
4508 alu.dst.chan = i;
4509 alu.dst.write = i == 0;
4510 if (i == 3)
4511 alu.last = 1;
4512 r = r600_bytecode_add_alu(ctx->bc, &alu);
4513 if (r)
4514 return r;
4515 }
4516 } else {
4517 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4518 alu.op = ALU_OP2_MULLO_INT;
4519 alu.src[0].sel = src_gpr;
4520 alu.src[0].chan = sample_chan;
4521 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
4522 alu.src[1].value = 4;
4523 alu.dst.sel = temp;
4524 alu.dst.chan = 0;
4525 alu.dst.write = 1;
4526 alu.last = 1;
4527 r = r600_bytecode_add_alu(ctx->bc, &alu);
4528 if (r)
4529 return r;
4530 }
4531
4532 /* sample_index = temp.w >> temp.x */
4533 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4534 alu.op = ALU_OP2_LSHR_INT;
4535 alu.src[0].sel = temp;
4536 alu.src[0].chan = 3;
4537 alu.src[1].sel = temp;
4538 alu.src[1].chan = 0;
4539 alu.dst.sel = src_gpr;
4540 alu.dst.chan = sample_chan;
4541 alu.dst.write = 1;
4542 alu.last = 1;
4543 r = r600_bytecode_add_alu(ctx->bc, &alu);
4544 if (r)
4545 return r;
4546
4547 /* sample_index & 0xF */
4548 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4549 alu.op = ALU_OP2_AND_INT;
4550 alu.src[0].sel = src_gpr;
4551 alu.src[0].chan = sample_chan;
4552 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
4553 alu.src[1].value = 0xF;
4554 alu.dst.sel = src_gpr;
4555 alu.dst.chan = sample_chan;
4556 alu.dst.write = 1;
4557 alu.last = 1;
4558 r = r600_bytecode_add_alu(ctx->bc, &alu);
4559 if (r)
4560 return r;
4561 #if 0
4562 /* visualize the FMASK */
4563 for (i = 0; i < 4; i++) {
4564 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4565 alu.op = ALU_OP1_INT_TO_FLT;
4566 alu.src[0].sel = src_gpr;
4567 alu.src[0].chan = sample_chan;
4568 alu.dst.sel = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
4569 alu.dst.chan = i;
4570 alu.dst.write = 1;
4571 alu.last = 1;
4572 r = r600_bytecode_add_alu(ctx->bc, &alu);
4573 if (r)
4574 return r;
4575 }
4576 return 0;
4577 #endif
4578 }
4579
4580 /* does this shader want a num layers from TXQ for a cube array? */
4581 if (has_txq_cube_array_z) {
4582 int id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
4583
4584 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4585 alu.op = ALU_OP1_MOV;
4586
4587 alu.src[0].sel = 512 + (id / 4);
4588 alu.src[0].kc_bank = R600_TXQ_CONST_BUFFER;
4589 alu.src[0].chan = id % 4;
4590 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
4591 alu.last = 1;
4592 r = r600_bytecode_add_alu(ctx->bc, &alu);
4593 if (r)
4594 return r;
4595 /* disable writemask from texture instruction */
4596 inst->Dst[0].Register.WriteMask &= ~4;
4597 }
4598
4599 opcode = ctx->inst_info->op;
4600 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
4601 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
4602 inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
4603 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
4604 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY ||
4605 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
4606 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
4607 switch (opcode) {
4608 case FETCH_OP_SAMPLE:
4609 opcode = FETCH_OP_SAMPLE_C;
4610 break;
4611 case FETCH_OP_SAMPLE_L:
4612 opcode = FETCH_OP_SAMPLE_C_L;
4613 break;
4614 case FETCH_OP_SAMPLE_LB:
4615 opcode = FETCH_OP_SAMPLE_C_LB;
4616 break;
4617 case FETCH_OP_SAMPLE_G:
4618 opcode = FETCH_OP_SAMPLE_C_G;
4619 break;
4620 }
4621 }
4622
4623 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
4624 tex.op = opcode;
4625
4626 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
4627 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
4628 tex.src_gpr = src_gpr;
4629 tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
4630 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
4631 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
4632 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
4633 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
4634
4635 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ_LZ) {
4636 tex.src_sel_x = 4;
4637 tex.src_sel_y = 4;
4638 tex.src_sel_z = 4;
4639 tex.src_sel_w = 4;
4640 } else if (src_loaded) {
4641 tex.src_sel_x = 0;
4642 tex.src_sel_y = 1;
4643 tex.src_sel_z = 2;
4644 tex.src_sel_w = 3;
4645 } else {
4646 tex.src_sel_x = ctx->src[0].swizzle[0];
4647 tex.src_sel_y = ctx->src[0].swizzle[1];
4648 tex.src_sel_z = ctx->src[0].swizzle[2];
4649 tex.src_sel_w = ctx->src[0].swizzle[3];
4650 tex.src_rel = ctx->src[0].rel;
4651 }
4652
4653 if (inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
4654 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
4655 inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
4656 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
4657 tex.src_sel_x = 1;
4658 tex.src_sel_y = 0;
4659 tex.src_sel_z = 3;
4660 tex.src_sel_w = 2; /* route Z compare or Lod value into W */
4661 }
4662
4663 if (inst->Texture.Texture != TGSI_TEXTURE_RECT &&
4664 inst->Texture.Texture != TGSI_TEXTURE_SHADOWRECT) {
4665 tex.coord_type_x = 1;
4666 tex.coord_type_y = 1;
4667 }
4668 tex.coord_type_z = 1;
4669 tex.coord_type_w = 1;
4670
4671 tex.offset_x = offset_x;
4672 tex.offset_y = offset_y;
4673 tex.offset_z = offset_z;
4674
4675 /* Put the depth for comparison in W.
4676 * TGSI_TEXTURE_SHADOW2D_ARRAY already has the depth in W.
4677 * Some instructions expect the depth in Z. */
4678 if ((inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
4679 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
4680 inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
4681 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY) &&
4682 opcode != FETCH_OP_SAMPLE_C_L &&
4683 opcode != FETCH_OP_SAMPLE_C_LB) {
4684 tex.src_sel_w = tex.src_sel_z;
4685 }
4686
4687 if (inst->Texture.Texture == TGSI_TEXTURE_1D_ARRAY ||
4688 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY) {
4689 if (opcode == FETCH_OP_SAMPLE_C_L ||
4690 opcode == FETCH_OP_SAMPLE_C_LB) {
4691 /* the array index is read from Y */
4692 tex.coord_type_y = 0;
4693 } else {
4694 /* the array index is read from Z */
4695 tex.coord_type_z = 0;
4696 tex.src_sel_z = tex.src_sel_y;
4697 }
4698 } else if (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
4699 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
4700 ((inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
4701 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) &&
4702 (ctx->bc->chip_class >= EVERGREEN)))
4703 /* the array index is read from Z */
4704 tex.coord_type_z = 0;
4705
4706 r = r600_bytecode_add_tex(ctx->bc, &tex);
4707 if (r)
4708 return r;
4709
4710 /* add shadow ambient support - gallium doesn't do it yet */
4711 return 0;
4712 }
4713
4714 static int tgsi_lrp(struct r600_shader_ctx *ctx)
4715 {
4716 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4717 struct r600_bytecode_alu alu;
4718 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4719 unsigned i;
4720 int r;
4721
4722 /* optimize if it's just an equal balance */
4723 if (ctx->src[0].sel == V_SQ_ALU_SRC_0_5) {
4724 for (i = 0; i < lasti + 1; i++) {
4725 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4726 continue;
4727
4728 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4729 alu.op = ALU_OP2_ADD;
4730 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
4731 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
4732 alu.omod = 3;
4733 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4734 alu.dst.chan = i;
4735 if (i == lasti) {
4736 alu.last = 1;
4737 }
4738 r = r600_bytecode_add_alu(ctx->bc, &alu);
4739 if (r)
4740 return r;
4741 }
4742 return 0;
4743 }
4744
4745 /* 1 - src0 */
4746 for (i = 0; i < lasti + 1; i++) {
4747 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4748 continue;
4749
4750 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4751 alu.op = ALU_OP2_ADD;
4752 alu.src[0].sel = V_SQ_ALU_SRC_1;
4753 alu.src[0].chan = 0;
4754 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4755 r600_bytecode_src_toggle_neg(&alu.src[1]);
4756 alu.dst.sel = ctx->temp_reg;
4757 alu.dst.chan = i;
4758 if (i == lasti) {
4759 alu.last = 1;
4760 }
4761 alu.dst.write = 1;
4762 r = r600_bytecode_add_alu(ctx->bc, &alu);
4763 if (r)
4764 return r;
4765 }
4766
4767 /* (1 - src0) * src2 */
4768 for (i = 0; i < lasti + 1; i++) {
4769 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4770 continue;
4771
4772 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4773 alu.op = ALU_OP2_MUL;
4774 alu.src[0].sel = ctx->temp_reg;
4775 alu.src[0].chan = i;
4776 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
4777 alu.dst.sel = ctx->temp_reg;
4778 alu.dst.chan = i;
4779 if (i == lasti) {
4780 alu.last = 1;
4781 }
4782 alu.dst.write = 1;
4783 r = r600_bytecode_add_alu(ctx->bc, &alu);
4784 if (r)
4785 return r;
4786 }
4787
4788 /* src0 * src1 + (1 - src0) * src2 */
4789 for (i = 0; i < lasti + 1; i++) {
4790 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4791 continue;
4792
4793 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4794 alu.op = ALU_OP3_MULADD;
4795 alu.is_op3 = 1;
4796 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4797 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4798 alu.src[2].sel = ctx->temp_reg;
4799 alu.src[2].chan = i;
4800
4801 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4802 alu.dst.chan = i;
4803 if (i == lasti) {
4804 alu.last = 1;
4805 }
4806 r = r600_bytecode_add_alu(ctx->bc, &alu);
4807 if (r)
4808 return r;
4809 }
4810 return 0;
4811 }
4812
4813 static int tgsi_cmp(struct r600_shader_ctx *ctx)
4814 {
4815 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4816 struct r600_bytecode_alu alu;
4817 int i, r;
4818 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4819
4820 for (i = 0; i < lasti + 1; i++) {
4821 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4822 continue;
4823
4824 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4825 alu.op = ALU_OP3_CNDGE;
4826 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4827 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
4828 r600_bytecode_src(&alu.src[2], &ctx->src[1], i);
4829 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4830 alu.dst.chan = i;
4831 alu.dst.write = 1;
4832 alu.is_op3 = 1;
4833 if (i == lasti)
4834 alu.last = 1;
4835 r = r600_bytecode_add_alu(ctx->bc, &alu);
4836 if (r)
4837 return r;
4838 }
4839 return 0;
4840 }
4841
4842 static int tgsi_ucmp(struct r600_shader_ctx *ctx)
4843 {
4844 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4845 struct r600_bytecode_alu alu;
4846 int i, r;
4847 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
4848
4849 for (i = 0; i < lasti + 1; i++) {
4850 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
4851 continue;
4852
4853 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4854 alu.op = ALU_OP3_CNDGE_INT;
4855 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4856 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
4857 r600_bytecode_src(&alu.src[2], &ctx->src[1], i);
4858 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4859 alu.dst.chan = i;
4860 alu.dst.write = 1;
4861 alu.is_op3 = 1;
4862 if (i == lasti)
4863 alu.last = 1;
4864 r = r600_bytecode_add_alu(ctx->bc, &alu);
4865 if (r)
4866 return r;
4867 }
4868 return 0;
4869 }
4870
4871 static int tgsi_xpd(struct r600_shader_ctx *ctx)
4872 {
4873 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4874 static const unsigned int src0_swizzle[] = {2, 0, 1};
4875 static const unsigned int src1_swizzle[] = {1, 2, 0};
4876 struct r600_bytecode_alu alu;
4877 uint32_t use_temp = 0;
4878 int i, r;
4879
4880 if (inst->Dst[0].Register.WriteMask != 0xf)
4881 use_temp = 1;
4882
4883 for (i = 0; i < 4; i++) {
4884 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4885 alu.op = ALU_OP2_MUL;
4886 if (i < 3) {
4887 r600_bytecode_src(&alu.src[0], &ctx->src[0], src0_swizzle[i]);
4888 r600_bytecode_src(&alu.src[1], &ctx->src[1], src1_swizzle[i]);
4889 } else {
4890 alu.src[0].sel = V_SQ_ALU_SRC_0;
4891 alu.src[0].chan = i;
4892 alu.src[1].sel = V_SQ_ALU_SRC_0;
4893 alu.src[1].chan = i;
4894 }
4895
4896 alu.dst.sel = ctx->temp_reg;
4897 alu.dst.chan = i;
4898 alu.dst.write = 1;
4899
4900 if (i == 3)
4901 alu.last = 1;
4902 r = r600_bytecode_add_alu(ctx->bc, &alu);
4903 if (r)
4904 return r;
4905 }
4906
4907 for (i = 0; i < 4; i++) {
4908 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4909 alu.op = ALU_OP3_MULADD;
4910
4911 if (i < 3) {
4912 r600_bytecode_src(&alu.src[0], &ctx->src[0], src1_swizzle[i]);
4913 r600_bytecode_src(&alu.src[1], &ctx->src[1], src0_swizzle[i]);
4914 } else {
4915 alu.src[0].sel = V_SQ_ALU_SRC_0;
4916 alu.src[0].chan = i;
4917 alu.src[1].sel = V_SQ_ALU_SRC_0;
4918 alu.src[1].chan = i;
4919 }
4920
4921 alu.src[2].sel = ctx->temp_reg;
4922 alu.src[2].neg = 1;
4923 alu.src[2].chan = i;
4924
4925 if (use_temp)
4926 alu.dst.sel = ctx->temp_reg;
4927 else
4928 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4929 alu.dst.chan = i;
4930 alu.dst.write = 1;
4931 alu.is_op3 = 1;
4932 if (i == 3)
4933 alu.last = 1;
4934 r = r600_bytecode_add_alu(ctx->bc, &alu);
4935 if (r)
4936 return r;
4937 }
4938 if (use_temp)
4939 return tgsi_helper_copy(ctx, inst);
4940 return 0;
4941 }
4942
4943 static int tgsi_exp(struct r600_shader_ctx *ctx)
4944 {
4945 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4946 struct r600_bytecode_alu alu;
4947 int r;
4948 int i;
4949
4950 /* result.x = 2^floor(src); */
4951 if (inst->Dst[0].Register.WriteMask & 1) {
4952 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4953
4954 alu.op = ALU_OP1_FLOOR;
4955 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
4956
4957 alu.dst.sel = ctx->temp_reg;
4958 alu.dst.chan = 0;
4959 alu.dst.write = 1;
4960 alu.last = 1;
4961 r = r600_bytecode_add_alu(ctx->bc, &alu);
4962 if (r)
4963 return r;
4964
4965 if (ctx->bc->chip_class == CAYMAN) {
4966 for (i = 0; i < 3; i++) {
4967 alu.op = ALU_OP1_EXP_IEEE;
4968 alu.src[0].sel = ctx->temp_reg;
4969 alu.src[0].chan = 0;
4970
4971 alu.dst.sel = ctx->temp_reg;
4972 alu.dst.chan = i;
4973 alu.dst.write = i == 0;
4974 alu.last = i == 2;
4975 r = r600_bytecode_add_alu(ctx->bc, &alu);
4976 if (r)
4977 return r;
4978 }
4979 } else {
4980 alu.op = ALU_OP1_EXP_IEEE;
4981 alu.src[0].sel = ctx->temp_reg;
4982 alu.src[0].chan = 0;
4983
4984 alu.dst.sel = ctx->temp_reg;
4985 alu.dst.chan = 0;
4986 alu.dst.write = 1;
4987 alu.last = 1;
4988 r = r600_bytecode_add_alu(ctx->bc, &alu);
4989 if (r)
4990 return r;
4991 }
4992 }
4993
4994 /* result.y = tmp - floor(tmp); */
4995 if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
4996 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4997
4998 alu.op = ALU_OP1_FRACT;
4999 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5000
5001 alu.dst.sel = ctx->temp_reg;
5002 #if 0
5003 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5004 if (r)
5005 return r;
5006 #endif
5007 alu.dst.write = 1;
5008 alu.dst.chan = 1;
5009
5010 alu.last = 1;
5011
5012 r = r600_bytecode_add_alu(ctx->bc, &alu);
5013 if (r)
5014 return r;
5015 }
5016
5017 /* result.z = RoughApprox2ToX(tmp);*/
5018 if ((inst->Dst[0].Register.WriteMask >> 2) & 0x1) {
5019 if (ctx->bc->chip_class == CAYMAN) {
5020 for (i = 0; i < 3; i++) {
5021 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5022 alu.op = ALU_OP1_EXP_IEEE;
5023 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5024
5025 alu.dst.sel = ctx->temp_reg;
5026 alu.dst.chan = i;
5027 if (i == 2) {
5028 alu.dst.write = 1;
5029 alu.last = 1;
5030 }
5031
5032 r = r600_bytecode_add_alu(ctx->bc, &alu);
5033 if (r)
5034 return r;
5035 }
5036 } else {
5037 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5038 alu.op = ALU_OP1_EXP_IEEE;
5039 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5040
5041 alu.dst.sel = ctx->temp_reg;
5042 alu.dst.write = 1;
5043 alu.dst.chan = 2;
5044
5045 alu.last = 1;
5046
5047 r = r600_bytecode_add_alu(ctx->bc, &alu);
5048 if (r)
5049 return r;
5050 }
5051 }
5052
5053 /* result.w = 1.0;*/
5054 if ((inst->Dst[0].Register.WriteMask >> 3) & 0x1) {
5055 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5056
5057 alu.op = ALU_OP1_MOV;
5058 alu.src[0].sel = V_SQ_ALU_SRC_1;
5059 alu.src[0].chan = 0;
5060
5061 alu.dst.sel = ctx->temp_reg;
5062 alu.dst.chan = 3;
5063 alu.dst.write = 1;
5064 alu.last = 1;
5065 r = r600_bytecode_add_alu(ctx->bc, &alu);
5066 if (r)
5067 return r;
5068 }
5069 return tgsi_helper_copy(ctx, inst);
5070 }
5071
5072 static int tgsi_log(struct r600_shader_ctx *ctx)
5073 {
5074 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5075 struct r600_bytecode_alu alu;
5076 int r;
5077 int i;
5078
5079 /* result.x = floor(log2(|src|)); */
5080 if (inst->Dst[0].Register.WriteMask & 1) {
5081 if (ctx->bc->chip_class == CAYMAN) {
5082 for (i = 0; i < 3; i++) {
5083 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5084
5085 alu.op = ALU_OP1_LOG_IEEE;
5086 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5087 r600_bytecode_src_set_abs(&alu.src[0]);
5088
5089 alu.dst.sel = ctx->temp_reg;
5090 alu.dst.chan = i;
5091 if (i == 0)
5092 alu.dst.write = 1;
5093 if (i == 2)
5094 alu.last = 1;
5095 r = r600_bytecode_add_alu(ctx->bc, &alu);
5096 if (r)
5097 return r;
5098 }
5099
5100 } else {
5101 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5102
5103 alu.op = ALU_OP1_LOG_IEEE;
5104 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5105 r600_bytecode_src_set_abs(&alu.src[0]);
5106
5107 alu.dst.sel = ctx->temp_reg;
5108 alu.dst.chan = 0;
5109 alu.dst.write = 1;
5110 alu.last = 1;
5111 r = r600_bytecode_add_alu(ctx->bc, &alu);
5112 if (r)
5113 return r;
5114 }
5115
5116 alu.op = ALU_OP1_FLOOR;
5117 alu.src[0].sel = ctx->temp_reg;
5118 alu.src[0].chan = 0;
5119
5120 alu.dst.sel = ctx->temp_reg;
5121 alu.dst.chan = 0;
5122 alu.dst.write = 1;
5123 alu.last = 1;
5124
5125 r = r600_bytecode_add_alu(ctx->bc, &alu);
5126 if (r)
5127 return r;
5128 }
5129
5130 /* result.y = |src.x| / (2 ^ floor(log2(|src.x|))); */
5131 if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
5132
5133 if (ctx->bc->chip_class == CAYMAN) {
5134 for (i = 0; i < 3; i++) {
5135 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5136
5137 alu.op = ALU_OP1_LOG_IEEE;
5138 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5139 r600_bytecode_src_set_abs(&alu.src[0]);
5140
5141 alu.dst.sel = ctx->temp_reg;
5142 alu.dst.chan = i;
5143 if (i == 1)
5144 alu.dst.write = 1;
5145 if (i == 2)
5146 alu.last = 1;
5147
5148 r = r600_bytecode_add_alu(ctx->bc, &alu);
5149 if (r)
5150 return r;
5151 }
5152 } else {
5153 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5154
5155 alu.op = ALU_OP1_LOG_IEEE;
5156 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5157 r600_bytecode_src_set_abs(&alu.src[0]);
5158
5159 alu.dst.sel = ctx->temp_reg;
5160 alu.dst.chan = 1;
5161 alu.dst.write = 1;
5162 alu.last = 1;
5163
5164 r = r600_bytecode_add_alu(ctx->bc, &alu);
5165 if (r)
5166 return r;
5167 }
5168
5169 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5170
5171 alu.op = ALU_OP1_FLOOR;
5172 alu.src[0].sel = ctx->temp_reg;
5173 alu.src[0].chan = 1;
5174
5175 alu.dst.sel = ctx->temp_reg;
5176 alu.dst.chan = 1;
5177 alu.dst.write = 1;
5178 alu.last = 1;
5179
5180 r = r600_bytecode_add_alu(ctx->bc, &alu);
5181 if (r)
5182 return r;
5183
5184 if (ctx->bc->chip_class == CAYMAN) {
5185 for (i = 0; i < 3; i++) {
5186 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5187 alu.op = ALU_OP1_EXP_IEEE;
5188 alu.src[0].sel = ctx->temp_reg;
5189 alu.src[0].chan = 1;
5190
5191 alu.dst.sel = ctx->temp_reg;
5192 alu.dst.chan = i;
5193 if (i == 1)
5194 alu.dst.write = 1;
5195 if (i == 2)
5196 alu.last = 1;
5197
5198 r = r600_bytecode_add_alu(ctx->bc, &alu);
5199 if (r)
5200 return r;
5201 }
5202 } else {
5203 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5204 alu.op = ALU_OP1_EXP_IEEE;
5205 alu.src[0].sel = ctx->temp_reg;
5206 alu.src[0].chan = 1;
5207
5208 alu.dst.sel = ctx->temp_reg;
5209 alu.dst.chan = 1;
5210 alu.dst.write = 1;
5211 alu.last = 1;
5212
5213 r = r600_bytecode_add_alu(ctx->bc, &alu);
5214 if (r)
5215 return r;
5216 }
5217
5218 if (ctx->bc->chip_class == CAYMAN) {
5219 for (i = 0; i < 3; i++) {
5220 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5221 alu.op = ALU_OP1_RECIP_IEEE;
5222 alu.src[0].sel = ctx->temp_reg;
5223 alu.src[0].chan = 1;
5224
5225 alu.dst.sel = ctx->temp_reg;
5226 alu.dst.chan = i;
5227 if (i == 1)
5228 alu.dst.write = 1;
5229 if (i == 2)
5230 alu.last = 1;
5231
5232 r = r600_bytecode_add_alu(ctx->bc, &alu);
5233 if (r)
5234 return r;
5235 }
5236 } else {
5237 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5238 alu.op = ALU_OP1_RECIP_IEEE;
5239 alu.src[0].sel = ctx->temp_reg;
5240 alu.src[0].chan = 1;
5241
5242 alu.dst.sel = ctx->temp_reg;
5243 alu.dst.chan = 1;
5244 alu.dst.write = 1;
5245 alu.last = 1;
5246
5247 r = r600_bytecode_add_alu(ctx->bc, &alu);
5248 if (r)
5249 return r;
5250 }
5251
5252 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5253
5254 alu.op = ALU_OP2_MUL;
5255
5256 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5257 r600_bytecode_src_set_abs(&alu.src[0]);
5258
5259 alu.src[1].sel = ctx->temp_reg;
5260 alu.src[1].chan = 1;
5261
5262 alu.dst.sel = ctx->temp_reg;
5263 alu.dst.chan = 1;
5264 alu.dst.write = 1;
5265 alu.last = 1;
5266
5267 r = r600_bytecode_add_alu(ctx->bc, &alu);
5268 if (r)
5269 return r;
5270 }
5271
5272 /* result.z = log2(|src|);*/
5273 if ((inst->Dst[0].Register.WriteMask >> 2) & 1) {
5274 if (ctx->bc->chip_class == CAYMAN) {
5275 for (i = 0; i < 3; i++) {
5276 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5277
5278 alu.op = ALU_OP1_LOG_IEEE;
5279 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5280 r600_bytecode_src_set_abs(&alu.src[0]);
5281
5282 alu.dst.sel = ctx->temp_reg;
5283 if (i == 2)
5284 alu.dst.write = 1;
5285 alu.dst.chan = i;
5286 if (i == 2)
5287 alu.last = 1;
5288
5289 r = r600_bytecode_add_alu(ctx->bc, &alu);
5290 if (r)
5291 return r;
5292 }
5293 } else {
5294 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5295
5296 alu.op = ALU_OP1_LOG_IEEE;
5297 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5298 r600_bytecode_src_set_abs(&alu.src[0]);
5299
5300 alu.dst.sel = ctx->temp_reg;
5301 alu.dst.write = 1;
5302 alu.dst.chan = 2;
5303 alu.last = 1;
5304
5305 r = r600_bytecode_add_alu(ctx->bc, &alu);
5306 if (r)
5307 return r;
5308 }
5309 }
5310
5311 /* result.w = 1.0; */
5312 if ((inst->Dst[0].Register.WriteMask >> 3) & 1) {
5313 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5314
5315 alu.op = ALU_OP1_MOV;
5316 alu.src[0].sel = V_SQ_ALU_SRC_1;
5317 alu.src[0].chan = 0;
5318
5319 alu.dst.sel = ctx->temp_reg;
5320 alu.dst.chan = 3;
5321 alu.dst.write = 1;
5322 alu.last = 1;
5323
5324 r = r600_bytecode_add_alu(ctx->bc, &alu);
5325 if (r)
5326 return r;
5327 }
5328
5329 return tgsi_helper_copy(ctx, inst);
5330 }
5331
5332 static int tgsi_eg_arl(struct r600_shader_ctx *ctx)
5333 {
5334 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5335 struct r600_bytecode_alu alu;
5336 int r;
5337
5338 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5339
5340 switch (inst->Instruction.Opcode) {
5341 case TGSI_OPCODE_ARL:
5342 alu.op = ALU_OP1_FLT_TO_INT_FLOOR;
5343 break;
5344 case TGSI_OPCODE_ARR:
5345 alu.op = ALU_OP1_FLT_TO_INT;
5346 break;
5347 case TGSI_OPCODE_UARL:
5348 alu.op = ALU_OP1_MOV;
5349 break;
5350 default:
5351 assert(0);
5352 return -1;
5353 }
5354
5355 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5356 alu.last = 1;
5357 alu.dst.sel = ctx->bc->ar_reg;
5358 alu.dst.write = 1;
5359 r = r600_bytecode_add_alu(ctx->bc, &alu);
5360 if (r)
5361 return r;
5362
5363 ctx->bc->ar_loaded = 0;
5364 return 0;
5365 }
5366 static int tgsi_r600_arl(struct r600_shader_ctx *ctx)
5367 {
5368 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5369 struct r600_bytecode_alu alu;
5370 int r;
5371
5372 switch (inst->Instruction.Opcode) {
5373 case TGSI_OPCODE_ARL:
5374 memset(&alu, 0, sizeof(alu));
5375 alu.op = ALU_OP1_FLOOR;
5376 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5377 alu.dst.sel = ctx->bc->ar_reg;
5378 alu.dst.write = 1;
5379 alu.last = 1;
5380
5381 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5382 return r;
5383
5384 memset(&alu, 0, sizeof(alu));
5385 alu.op = ALU_OP1_FLT_TO_INT;
5386 alu.src[0].sel = ctx->bc->ar_reg;
5387 alu.dst.sel = ctx->bc->ar_reg;
5388 alu.dst.write = 1;
5389 alu.last = 1;
5390
5391 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5392 return r;
5393 break;
5394 case TGSI_OPCODE_ARR:
5395 memset(&alu, 0, sizeof(alu));
5396 alu.op = ALU_OP1_FLT_TO_INT;
5397 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5398 alu.dst.sel = ctx->bc->ar_reg;
5399 alu.dst.write = 1;
5400 alu.last = 1;
5401
5402 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5403 return r;
5404 break;
5405 case TGSI_OPCODE_UARL:
5406 memset(&alu, 0, sizeof(alu));
5407 alu.op = ALU_OP1_MOV;
5408 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5409 alu.dst.sel = ctx->bc->ar_reg;
5410 alu.dst.write = 1;
5411 alu.last = 1;
5412
5413 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
5414 return r;
5415 break;
5416 default:
5417 assert(0);
5418 return -1;
5419 }
5420
5421 ctx->bc->ar_loaded = 0;
5422 return 0;
5423 }
5424
5425 static int tgsi_opdst(struct r600_shader_ctx *ctx)
5426 {
5427 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5428 struct r600_bytecode_alu alu;
5429 int i, r = 0;
5430
5431 for (i = 0; i < 4; i++) {
5432 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5433
5434 alu.op = ALU_OP2_MUL;
5435 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5436
5437 if (i == 0 || i == 3) {
5438 alu.src[0].sel = V_SQ_ALU_SRC_1;
5439 } else {
5440 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5441 }
5442
5443 if (i == 0 || i == 2) {
5444 alu.src[1].sel = V_SQ_ALU_SRC_1;
5445 } else {
5446 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
5447 }
5448 if (i == 3)
5449 alu.last = 1;
5450 r = r600_bytecode_add_alu(ctx->bc, &alu);
5451 if (r)
5452 return r;
5453 }
5454 return 0;
5455 }
5456
5457 static int emit_logic_pred(struct r600_shader_ctx *ctx, int opcode)
5458 {
5459 struct r600_bytecode_alu alu;
5460 int r;
5461
5462 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5463 alu.op = opcode;
5464 alu.execute_mask = 1;
5465 alu.update_pred = 1;
5466
5467 alu.dst.sel = ctx->temp_reg;
5468 alu.dst.write = 1;
5469 alu.dst.chan = 0;
5470
5471 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
5472 alu.src[1].sel = V_SQ_ALU_SRC_0;
5473 alu.src[1].chan = 0;
5474
5475 alu.last = 1;
5476
5477 r = r600_bytecode_add_alu_type(ctx->bc, &alu, CF_OP_ALU_PUSH_BEFORE);
5478 if (r)
5479 return r;
5480 return 0;
5481 }
5482
5483 static int pops(struct r600_shader_ctx *ctx, int pops)
5484 {
5485 unsigned force_pop = ctx->bc->force_add_cf;
5486
5487 if (!force_pop) {
5488 int alu_pop = 3;
5489 if (ctx->bc->cf_last) {
5490 if (ctx->bc->cf_last->op == CF_OP_ALU)
5491 alu_pop = 0;
5492 else if (ctx->bc->cf_last->op == CF_OP_ALU_POP_AFTER)
5493 alu_pop = 1;
5494 }
5495 alu_pop += pops;
5496 if (alu_pop == 1) {
5497 ctx->bc->cf_last->op = CF_OP_ALU_POP_AFTER;
5498 ctx->bc->force_add_cf = 1;
5499 } else if (alu_pop == 2) {
5500 ctx->bc->cf_last->op = CF_OP_ALU_POP2_AFTER;
5501 ctx->bc->force_add_cf = 1;
5502 } else {
5503 force_pop = 1;
5504 }
5505 }
5506
5507 if (force_pop) {
5508 r600_bytecode_add_cfinst(ctx->bc, CF_OP_POP);
5509 ctx->bc->cf_last->pop_count = pops;
5510 ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2;
5511 }
5512
5513 return 0;
5514 }
5515
5516 static inline void callstack_decrease_current(struct r600_shader_ctx *ctx, unsigned reason)
5517 {
5518 switch(reason) {
5519 case FC_PUSH_VPM:
5520 ctx->bc->callstack[ctx->bc->call_sp].current--;
5521 break;
5522 case FC_PUSH_WQM:
5523 case FC_LOOP:
5524 ctx->bc->callstack[ctx->bc->call_sp].current -= 4;
5525 break;
5526 case FC_REP:
5527 /* TOODO : for 16 vp asic should -= 2; */
5528 ctx->bc->callstack[ctx->bc->call_sp].current --;
5529 break;
5530 }
5531 }
5532
5533 static inline void callstack_check_depth(struct r600_shader_ctx *ctx, unsigned reason, unsigned check_max_only)
5534 {
5535 if (check_max_only) {
5536 int diff;
5537 switch (reason) {
5538 case FC_PUSH_VPM:
5539 diff = 1;
5540 break;
5541 case FC_PUSH_WQM:
5542 diff = 4;
5543 break;
5544 default:
5545 assert(0);
5546 diff = 0;
5547 }
5548 if ((ctx->bc->callstack[ctx->bc->call_sp].current + diff) >
5549 ctx->bc->callstack[ctx->bc->call_sp].max) {
5550 ctx->bc->callstack[ctx->bc->call_sp].max =
5551 ctx->bc->callstack[ctx->bc->call_sp].current + diff;
5552 }
5553 return;
5554 }
5555 switch (reason) {
5556 case FC_PUSH_VPM:
5557 ctx->bc->callstack[ctx->bc->call_sp].current++;
5558 break;
5559 case FC_PUSH_WQM:
5560 case FC_LOOP:
5561 ctx->bc->callstack[ctx->bc->call_sp].current += 4;
5562 break;
5563 case FC_REP:
5564 ctx->bc->callstack[ctx->bc->call_sp].current++;
5565 break;
5566 }
5567
5568 if ((ctx->bc->callstack[ctx->bc->call_sp].current) >
5569 ctx->bc->callstack[ctx->bc->call_sp].max) {
5570 ctx->bc->callstack[ctx->bc->call_sp].max =
5571 ctx->bc->callstack[ctx->bc->call_sp].current;
5572 }
5573 }
5574
5575 static void fc_set_mid(struct r600_shader_ctx *ctx, int fc_sp)
5576 {
5577 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[fc_sp];
5578
5579 sp->mid = realloc((void *)sp->mid,
5580 sizeof(struct r600_bytecode_cf *) * (sp->num_mid + 1));
5581 sp->mid[sp->num_mid] = ctx->bc->cf_last;
5582 sp->num_mid++;
5583 }
5584
5585 static void fc_pushlevel(struct r600_shader_ctx *ctx, int type)
5586 {
5587 ctx->bc->fc_sp++;
5588 ctx->bc->fc_stack[ctx->bc->fc_sp].type = type;
5589 ctx->bc->fc_stack[ctx->bc->fc_sp].start = ctx->bc->cf_last;
5590 }
5591
5592 static void fc_poplevel(struct r600_shader_ctx *ctx)
5593 {
5594 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[ctx->bc->fc_sp];
5595 free(sp->mid);
5596 sp->mid = NULL;
5597 sp->num_mid = 0;
5598 sp->start = NULL;
5599 sp->type = 0;
5600 ctx->bc->fc_sp--;
5601 }
5602
5603 #if 0
5604 static int emit_return(struct r600_shader_ctx *ctx)
5605 {
5606 r600_bytecode_add_cfinst(ctx->bc, CF_OP_RETURN));
5607 return 0;
5608 }
5609
5610 static int emit_jump_to_offset(struct r600_shader_ctx *ctx, int pops, int offset)
5611 {
5612
5613 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP));
5614 ctx->bc->cf_last->pop_count = pops;
5615 /* XXX work out offset */
5616 return 0;
5617 }
5618
5619 static int emit_setret_in_loop_flag(struct r600_shader_ctx *ctx, unsigned flag_value)
5620 {
5621 return 0;
5622 }
5623
5624 static void emit_testflag(struct r600_shader_ctx *ctx)
5625 {
5626
5627 }
5628
5629 static void emit_return_on_flag(struct r600_shader_ctx *ctx, unsigned ifidx)
5630 {
5631 emit_testflag(ctx);
5632 emit_jump_to_offset(ctx, 1, 4);
5633 emit_setret_in_loop_flag(ctx, V_SQ_ALU_SRC_0);
5634 pops(ctx, ifidx + 1);
5635 emit_return(ctx);
5636 }
5637
5638 static void break_loop_on_flag(struct r600_shader_ctx *ctx, unsigned fc_sp)
5639 {
5640 emit_testflag(ctx);
5641
5642 r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
5643 ctx->bc->cf_last->pop_count = 1;
5644
5645 fc_set_mid(ctx, fc_sp);
5646
5647 pops(ctx, 1);
5648 }
5649 #endif
5650
5651 static int tgsi_if(struct r600_shader_ctx *ctx)
5652 {
5653 emit_logic_pred(ctx, ALU_OP2_PRED_SETNE_INT);
5654
5655 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP);
5656
5657 fc_pushlevel(ctx, FC_IF);
5658
5659 callstack_check_depth(ctx, FC_PUSH_VPM, 0);
5660 return 0;
5661 }
5662
5663 static int tgsi_else(struct r600_shader_ctx *ctx)
5664 {
5665 r600_bytecode_add_cfinst(ctx->bc, CF_OP_ELSE);
5666 ctx->bc->cf_last->pop_count = 1;
5667
5668 fc_set_mid(ctx, ctx->bc->fc_sp);
5669 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id;
5670 return 0;
5671 }
5672
5673 static int tgsi_endif(struct r600_shader_ctx *ctx)
5674 {
5675 pops(ctx, 1);
5676 if (ctx->bc->fc_stack[ctx->bc->fc_sp].type != FC_IF) {
5677 R600_ERR("if/endif unbalanced in shader\n");
5678 return -1;
5679 }
5680
5681 if (ctx->bc->fc_stack[ctx->bc->fc_sp].mid == NULL) {
5682 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id + 2;
5683 ctx->bc->fc_stack[ctx->bc->fc_sp].start->pop_count = 1;
5684 } else {
5685 ctx->bc->fc_stack[ctx->bc->fc_sp].mid[0]->cf_addr = ctx->bc->cf_last->id + 2;
5686 }
5687 fc_poplevel(ctx);
5688
5689 callstack_decrease_current(ctx, FC_PUSH_VPM);
5690 return 0;
5691 }
5692
5693 static int tgsi_bgnloop(struct r600_shader_ctx *ctx)
5694 {
5695 /* LOOP_START_DX10 ignores the LOOP_CONFIG* registers, so it is not
5696 * limited to 4096 iterations, like the other LOOP_* instructions. */
5697 r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_START_DX10);
5698
5699 fc_pushlevel(ctx, FC_LOOP);
5700
5701 /* check stack depth */
5702 callstack_check_depth(ctx, FC_LOOP, 0);
5703 return 0;
5704 }
5705
5706 static int tgsi_endloop(struct r600_shader_ctx *ctx)
5707 {
5708 int i;
5709
5710 r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_END);
5711
5712 if (ctx->bc->fc_stack[ctx->bc->fc_sp].type != FC_LOOP) {
5713 R600_ERR("loop/endloop in shader code are not paired.\n");
5714 return -EINVAL;
5715 }
5716
5717 /* fixup loop pointers - from r600isa
5718 LOOP END points to CF after LOOP START,
5719 LOOP START point to CF after LOOP END
5720 BRK/CONT point to LOOP END CF
5721 */
5722 ctx->bc->cf_last->cf_addr = ctx->bc->fc_stack[ctx->bc->fc_sp].start->id + 2;
5723
5724 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id + 2;
5725
5726 for (i = 0; i < ctx->bc->fc_stack[ctx->bc->fc_sp].num_mid; i++) {
5727 ctx->bc->fc_stack[ctx->bc->fc_sp].mid[i]->cf_addr = ctx->bc->cf_last->id;
5728 }
5729 /* XXX add LOOPRET support */
5730 fc_poplevel(ctx);
5731 callstack_decrease_current(ctx, FC_LOOP);
5732 return 0;
5733 }
5734
5735 static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx)
5736 {
5737 unsigned int fscp;
5738
5739 for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--)
5740 {
5741 if (FC_LOOP == ctx->bc->fc_stack[fscp].type)
5742 break;
5743 }
5744
5745 if (fscp == 0) {
5746 R600_ERR("Break not inside loop/endloop pair\n");
5747 return -EINVAL;
5748 }
5749
5750 r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
5751
5752 fc_set_mid(ctx, fscp);
5753
5754 callstack_check_depth(ctx, FC_PUSH_VPM, 1);
5755 return 0;
5756 }
5757
5758 static int tgsi_umad(struct r600_shader_ctx *ctx)
5759 {
5760 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5761 struct r600_bytecode_alu alu;
5762 int i, j, r;
5763 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
5764
5765 /* src0 * src1 */
5766 for (i = 0; i < lasti + 1; i++) {
5767 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
5768 continue;
5769
5770 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5771
5772 alu.dst.chan = i;
5773 alu.dst.sel = ctx->temp_reg;
5774 alu.dst.write = 1;
5775
5776 alu.op = ALU_OP2_MULLO_UINT;
5777 for (j = 0; j < 2; j++) {
5778 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
5779 }
5780
5781 alu.last = 1;
5782 r = r600_bytecode_add_alu(ctx->bc, &alu);
5783 if (r)
5784 return r;
5785 }
5786
5787
5788 for (i = 0; i < lasti + 1; i++) {
5789 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
5790 continue;
5791
5792 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5793 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5794
5795 alu.op = ALU_OP2_ADD_INT;
5796
5797 alu.src[0].sel = ctx->temp_reg;
5798 alu.src[0].chan = i;
5799
5800 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
5801 if (i == lasti) {
5802 alu.last = 1;
5803 }
5804 r = r600_bytecode_add_alu(ctx->bc, &alu);
5805 if (r)
5806 return r;
5807 }
5808 return 0;
5809 }
5810
5811 static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] = {
5812 {TGSI_OPCODE_ARL, 0, ALU_OP0_NOP, tgsi_r600_arl},
5813 {TGSI_OPCODE_MOV, 0, ALU_OP1_MOV, tgsi_op2},
5814 {TGSI_OPCODE_LIT, 0, ALU_OP0_NOP, tgsi_lit},
5815
5816 /* XXX:
5817 * For state trackers other than OpenGL, we'll want to use
5818 * _RECIP_IEEE instead.
5819 */
5820 {TGSI_OPCODE_RCP, 0, ALU_OP1_RECIP_CLAMPED, tgsi_trans_srcx_replicate},
5821
5822 {TGSI_OPCODE_RSQ, 0, ALU_OP0_NOP, tgsi_rsq},
5823 {TGSI_OPCODE_EXP, 0, ALU_OP0_NOP, tgsi_exp},
5824 {TGSI_OPCODE_LOG, 0, ALU_OP0_NOP, tgsi_log},
5825 {TGSI_OPCODE_MUL, 0, ALU_OP2_MUL, tgsi_op2},
5826 {TGSI_OPCODE_ADD, 0, ALU_OP2_ADD, tgsi_op2},
5827 {TGSI_OPCODE_DP3, 0, ALU_OP2_DOT4, tgsi_dp},
5828 {TGSI_OPCODE_DP4, 0, ALU_OP2_DOT4, tgsi_dp},
5829 {TGSI_OPCODE_DST, 0, ALU_OP0_NOP, tgsi_opdst},
5830 {TGSI_OPCODE_MIN, 0, ALU_OP2_MIN, tgsi_op2},
5831 {TGSI_OPCODE_MAX, 0, ALU_OP2_MAX, tgsi_op2},
5832 {TGSI_OPCODE_SLT, 0, ALU_OP2_SETGT, tgsi_op2_swap},
5833 {TGSI_OPCODE_SGE, 0, ALU_OP2_SETGE, tgsi_op2},
5834 {TGSI_OPCODE_MAD, 1, ALU_OP3_MULADD, tgsi_op3},
5835 {TGSI_OPCODE_SUB, 0, ALU_OP2_ADD, tgsi_op2},
5836 {TGSI_OPCODE_LRP, 0, ALU_OP0_NOP, tgsi_lrp},
5837 {TGSI_OPCODE_CND, 0, ALU_OP0_NOP, tgsi_unsupported},
5838 /* gap */
5839 {20, 0, ALU_OP0_NOP, tgsi_unsupported},
5840 {TGSI_OPCODE_DP2A, 0, ALU_OP0_NOP, tgsi_unsupported},
5841 /* gap */
5842 {22, 0, ALU_OP0_NOP, tgsi_unsupported},
5843 {23, 0, ALU_OP0_NOP, tgsi_unsupported},
5844 {TGSI_OPCODE_FRC, 0, ALU_OP1_FRACT, tgsi_op2},
5845 {TGSI_OPCODE_CLAMP, 0, ALU_OP0_NOP, tgsi_unsupported},
5846 {TGSI_OPCODE_FLR, 0, ALU_OP1_FLOOR, tgsi_op2},
5847 {TGSI_OPCODE_ROUND, 0, ALU_OP1_RNDNE, tgsi_op2},
5848 {TGSI_OPCODE_EX2, 0, ALU_OP1_EXP_IEEE, tgsi_trans_srcx_replicate},
5849 {TGSI_OPCODE_LG2, 0, ALU_OP1_LOG_IEEE, tgsi_trans_srcx_replicate},
5850 {TGSI_OPCODE_POW, 0, ALU_OP0_NOP, tgsi_pow},
5851 {TGSI_OPCODE_XPD, 0, ALU_OP0_NOP, tgsi_xpd},
5852 /* gap */
5853 {32, 0, ALU_OP0_NOP, tgsi_unsupported},
5854 {TGSI_OPCODE_ABS, 0, ALU_OP1_MOV, tgsi_op2},
5855 {TGSI_OPCODE_RCC, 0, ALU_OP0_NOP, tgsi_unsupported},
5856 {TGSI_OPCODE_DPH, 0, ALU_OP2_DOT4, tgsi_dp},
5857 {TGSI_OPCODE_COS, 0, ALU_OP1_COS, tgsi_trig},
5858 {TGSI_OPCODE_DDX, 0, FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
5859 {TGSI_OPCODE_DDY, 0, FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
5860 {TGSI_OPCODE_KILP, 0, ALU_OP2_KILLGT, tgsi_kill}, /* predicated kill */
5861 {TGSI_OPCODE_PK2H, 0, ALU_OP0_NOP, tgsi_unsupported},
5862 {TGSI_OPCODE_PK2US, 0, ALU_OP0_NOP, tgsi_unsupported},
5863 {TGSI_OPCODE_PK4B, 0, ALU_OP0_NOP, tgsi_unsupported},
5864 {TGSI_OPCODE_PK4UB, 0, ALU_OP0_NOP, tgsi_unsupported},
5865 {TGSI_OPCODE_RFL, 0, ALU_OP0_NOP, tgsi_unsupported},
5866 {TGSI_OPCODE_SEQ, 0, ALU_OP2_SETE, tgsi_op2},
5867 {TGSI_OPCODE_SFL, 0, ALU_OP0_NOP, tgsi_unsupported},
5868 {TGSI_OPCODE_SGT, 0, ALU_OP2_SETGT, tgsi_op2},
5869 {TGSI_OPCODE_SIN, 0, ALU_OP1_SIN, tgsi_trig},
5870 {TGSI_OPCODE_SLE, 0, ALU_OP2_SETGE, tgsi_op2_swap},
5871 {TGSI_OPCODE_SNE, 0, ALU_OP2_SETNE, tgsi_op2},
5872 {TGSI_OPCODE_STR, 0, ALU_OP0_NOP, tgsi_unsupported},
5873 {TGSI_OPCODE_TEX, 0, FETCH_OP_SAMPLE, tgsi_tex},
5874 {TGSI_OPCODE_TXD, 0, FETCH_OP_SAMPLE_G, tgsi_tex},
5875 {TGSI_OPCODE_TXP, 0, FETCH_OP_SAMPLE, tgsi_tex},
5876 {TGSI_OPCODE_UP2H, 0, ALU_OP0_NOP, tgsi_unsupported},
5877 {TGSI_OPCODE_UP2US, 0, ALU_OP0_NOP, tgsi_unsupported},
5878 {TGSI_OPCODE_UP4B, 0, ALU_OP0_NOP, tgsi_unsupported},
5879 {TGSI_OPCODE_UP4UB, 0, ALU_OP0_NOP, tgsi_unsupported},
5880 {TGSI_OPCODE_X2D, 0, ALU_OP0_NOP, tgsi_unsupported},
5881 {TGSI_OPCODE_ARA, 0, ALU_OP0_NOP, tgsi_unsupported},
5882 {TGSI_OPCODE_ARR, 0, ALU_OP0_NOP, tgsi_r600_arl},
5883 {TGSI_OPCODE_BRA, 0, ALU_OP0_NOP, tgsi_unsupported},
5884 {TGSI_OPCODE_CAL, 0, ALU_OP0_NOP, tgsi_unsupported},
5885 {TGSI_OPCODE_RET, 0, ALU_OP0_NOP, tgsi_unsupported},
5886 {TGSI_OPCODE_SSG, 0, ALU_OP0_NOP, tgsi_ssg},
5887 {TGSI_OPCODE_CMP, 0, ALU_OP0_NOP, tgsi_cmp},
5888 {TGSI_OPCODE_SCS, 0, ALU_OP0_NOP, tgsi_scs},
5889 {TGSI_OPCODE_TXB, 0, FETCH_OP_SAMPLE_LB, tgsi_tex},
5890 {TGSI_OPCODE_NRM, 0, ALU_OP0_NOP, tgsi_unsupported},
5891 {TGSI_OPCODE_DIV, 0, ALU_OP0_NOP, tgsi_unsupported},
5892 {TGSI_OPCODE_DP2, 0, ALU_OP2_DOT4, tgsi_dp},
5893 {TGSI_OPCODE_TXL, 0, FETCH_OP_SAMPLE_L, tgsi_tex},
5894 {TGSI_OPCODE_BRK, 0, CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
5895 {TGSI_OPCODE_IF, 0, ALU_OP0_NOP, tgsi_if},
5896 /* gap */
5897 {75, 0, ALU_OP0_NOP, tgsi_unsupported},
5898 {76, 0, ALU_OP0_NOP, tgsi_unsupported},
5899 {TGSI_OPCODE_ELSE, 0, ALU_OP0_NOP, tgsi_else},
5900 {TGSI_OPCODE_ENDIF, 0, ALU_OP0_NOP, tgsi_endif},
5901 /* gap */
5902 {79, 0, ALU_OP0_NOP, tgsi_unsupported},
5903 {80, 0, ALU_OP0_NOP, tgsi_unsupported},
5904 {TGSI_OPCODE_PUSHA, 0, ALU_OP0_NOP, tgsi_unsupported},
5905 {TGSI_OPCODE_POPA, 0, ALU_OP0_NOP, tgsi_unsupported},
5906 {TGSI_OPCODE_CEIL, 0, ALU_OP1_CEIL, tgsi_op2},
5907 {TGSI_OPCODE_I2F, 0, ALU_OP1_INT_TO_FLT, tgsi_op2_trans},
5908 {TGSI_OPCODE_NOT, 0, ALU_OP1_NOT_INT, tgsi_op2},
5909 {TGSI_OPCODE_TRUNC, 0, ALU_OP1_TRUNC, tgsi_op2},
5910 {TGSI_OPCODE_SHL, 0, ALU_OP2_LSHL_INT, tgsi_op2_trans},
5911 /* gap */
5912 {88, 0, ALU_OP0_NOP, tgsi_unsupported},
5913 {TGSI_OPCODE_AND, 0, ALU_OP2_AND_INT, tgsi_op2},
5914 {TGSI_OPCODE_OR, 0, ALU_OP2_OR_INT, tgsi_op2},
5915 {TGSI_OPCODE_MOD, 0, ALU_OP0_NOP, tgsi_imod},
5916 {TGSI_OPCODE_XOR, 0, ALU_OP2_XOR_INT, tgsi_op2},
5917 {TGSI_OPCODE_SAD, 0, ALU_OP0_NOP, tgsi_unsupported},
5918 {TGSI_OPCODE_TXF, 0, FETCH_OP_LD, tgsi_tex},
5919 {TGSI_OPCODE_TXQ, 0, FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
5920 {TGSI_OPCODE_CONT, 0, CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
5921 {TGSI_OPCODE_EMIT, 0, ALU_OP0_NOP, tgsi_unsupported},
5922 {TGSI_OPCODE_ENDPRIM, 0, ALU_OP0_NOP, tgsi_unsupported},
5923 {TGSI_OPCODE_BGNLOOP, 0, ALU_OP0_NOP, tgsi_bgnloop},
5924 {TGSI_OPCODE_BGNSUB, 0, ALU_OP0_NOP, tgsi_unsupported},
5925 {TGSI_OPCODE_ENDLOOP, 0, ALU_OP0_NOP, tgsi_endloop},
5926 {TGSI_OPCODE_ENDSUB, 0, ALU_OP0_NOP, tgsi_unsupported},
5927 {TGSI_OPCODE_TXQ_LZ, 0, FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
5928 /* gap */
5929 {104, 0, ALU_OP0_NOP, tgsi_unsupported},
5930 {105, 0, ALU_OP0_NOP, tgsi_unsupported},
5931 {106, 0, ALU_OP0_NOP, tgsi_unsupported},
5932 {TGSI_OPCODE_NOP, 0, ALU_OP0_NOP, tgsi_unsupported},
5933 /* gap */
5934 {108, 0, ALU_OP0_NOP, tgsi_unsupported},
5935 {109, 0, ALU_OP0_NOP, tgsi_unsupported},
5936 {110, 0, ALU_OP0_NOP, tgsi_unsupported},
5937 {111, 0, ALU_OP0_NOP, tgsi_unsupported},
5938 {TGSI_OPCODE_NRM4, 0, ALU_OP0_NOP, tgsi_unsupported},
5939 {TGSI_OPCODE_CALLNZ, 0, ALU_OP0_NOP, tgsi_unsupported},
5940 {TGSI_OPCODE_IFC, 0, ALU_OP0_NOP, tgsi_unsupported},
5941 {TGSI_OPCODE_BREAKC, 0, ALU_OP0_NOP, tgsi_unsupported},
5942 {TGSI_OPCODE_KIL, 0, ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
5943 {TGSI_OPCODE_END, 0, ALU_OP0_NOP, tgsi_end}, /* aka HALT */
5944 /* gap */
5945 {118, 0, ALU_OP0_NOP, tgsi_unsupported},
5946 {TGSI_OPCODE_F2I, 0, ALU_OP1_FLT_TO_INT, tgsi_op2_trans},
5947 {TGSI_OPCODE_IDIV, 0, ALU_OP0_NOP, tgsi_idiv},
5948 {TGSI_OPCODE_IMAX, 0, ALU_OP2_MAX_INT, tgsi_op2},
5949 {TGSI_OPCODE_IMIN, 0, ALU_OP2_MIN_INT, tgsi_op2},
5950 {TGSI_OPCODE_INEG, 0, ALU_OP2_SUB_INT, tgsi_ineg},
5951 {TGSI_OPCODE_ISGE, 0, ALU_OP2_SETGE_INT, tgsi_op2},
5952 {TGSI_OPCODE_ISHR, 0, ALU_OP2_ASHR_INT, tgsi_op2_trans},
5953 {TGSI_OPCODE_ISLT, 0, ALU_OP2_SETGT_INT, tgsi_op2_swap},
5954 {TGSI_OPCODE_F2U, 0, ALU_OP1_FLT_TO_UINT, tgsi_op2_trans},
5955 {TGSI_OPCODE_U2F, 0, ALU_OP1_UINT_TO_FLT, tgsi_op2_trans},
5956 {TGSI_OPCODE_UADD, 0, ALU_OP2_ADD_INT, tgsi_op2},
5957 {TGSI_OPCODE_UDIV, 0, ALU_OP0_NOP, tgsi_udiv},
5958 {TGSI_OPCODE_UMAD, 0, ALU_OP0_NOP, tgsi_umad},
5959 {TGSI_OPCODE_UMAX, 0, ALU_OP2_MAX_UINT, tgsi_op2},
5960 {TGSI_OPCODE_UMIN, 0, ALU_OP2_MIN_UINT, tgsi_op2},
5961 {TGSI_OPCODE_UMOD, 0, ALU_OP0_NOP, tgsi_umod},
5962 {TGSI_OPCODE_UMUL, 0, ALU_OP2_MULLO_UINT, tgsi_op2_trans},
5963 {TGSI_OPCODE_USEQ, 0, ALU_OP2_SETE_INT, tgsi_op2},
5964 {TGSI_OPCODE_USGE, 0, ALU_OP2_SETGE_UINT, tgsi_op2},
5965 {TGSI_OPCODE_USHR, 0, ALU_OP2_LSHR_INT, tgsi_op2_trans},
5966 {TGSI_OPCODE_USLT, 0, ALU_OP2_SETGT_UINT, tgsi_op2_swap},
5967 {TGSI_OPCODE_USNE, 0, ALU_OP2_SETNE_INT, tgsi_op2_swap},
5968 {TGSI_OPCODE_SWITCH, 0, ALU_OP0_NOP, tgsi_unsupported},
5969 {TGSI_OPCODE_CASE, 0, ALU_OP0_NOP, tgsi_unsupported},
5970 {TGSI_OPCODE_DEFAULT, 0, ALU_OP0_NOP, tgsi_unsupported},
5971 {TGSI_OPCODE_ENDSWITCH, 0, ALU_OP0_NOP, tgsi_unsupported},
5972 {TGSI_OPCODE_SAMPLE, 0, 0, tgsi_unsupported},
5973 {TGSI_OPCODE_SAMPLE_I, 0, 0, tgsi_unsupported},
5974 {TGSI_OPCODE_SAMPLE_I_MS, 0, 0, tgsi_unsupported},
5975 {TGSI_OPCODE_SAMPLE_B, 0, 0, tgsi_unsupported},
5976 {TGSI_OPCODE_SAMPLE_C, 0, 0, tgsi_unsupported},
5977 {TGSI_OPCODE_SAMPLE_C_LZ, 0, 0, tgsi_unsupported},
5978 {TGSI_OPCODE_SAMPLE_D, 0, 0, tgsi_unsupported},
5979 {TGSI_OPCODE_SAMPLE_L, 0, 0, tgsi_unsupported},
5980 {TGSI_OPCODE_GATHER4, 0, 0, tgsi_unsupported},
5981 {TGSI_OPCODE_SVIEWINFO, 0, 0, tgsi_unsupported},
5982 {TGSI_OPCODE_SAMPLE_POS, 0, 0, tgsi_unsupported},
5983 {TGSI_OPCODE_SAMPLE_INFO, 0, 0, tgsi_unsupported},
5984 {TGSI_OPCODE_UARL, 0, ALU_OP1_MOVA_INT, tgsi_r600_arl},
5985 {TGSI_OPCODE_UCMP, 0, ALU_OP0_NOP, tgsi_ucmp},
5986 {TGSI_OPCODE_IABS, 0, 0, tgsi_iabs},
5987 {TGSI_OPCODE_ISSG, 0, 0, tgsi_issg},
5988 {TGSI_OPCODE_LOAD, 0, ALU_OP0_NOP, tgsi_unsupported},
5989 {TGSI_OPCODE_STORE, 0, ALU_OP0_NOP, tgsi_unsupported},
5990 {TGSI_OPCODE_MFENCE, 0, ALU_OP0_NOP, tgsi_unsupported},
5991 {TGSI_OPCODE_LFENCE, 0, ALU_OP0_NOP, tgsi_unsupported},
5992 {TGSI_OPCODE_SFENCE, 0, ALU_OP0_NOP, tgsi_unsupported},
5993 {TGSI_OPCODE_BARRIER, 0, ALU_OP0_NOP, tgsi_unsupported},
5994 {TGSI_OPCODE_ATOMUADD, 0, ALU_OP0_NOP, tgsi_unsupported},
5995 {TGSI_OPCODE_ATOMXCHG, 0, ALU_OP0_NOP, tgsi_unsupported},
5996 {TGSI_OPCODE_ATOMCAS, 0, ALU_OP0_NOP, tgsi_unsupported},
5997 {TGSI_OPCODE_ATOMAND, 0, ALU_OP0_NOP, tgsi_unsupported},
5998 {TGSI_OPCODE_ATOMOR, 0, ALU_OP0_NOP, tgsi_unsupported},
5999 {TGSI_OPCODE_ATOMXOR, 0, ALU_OP0_NOP, tgsi_unsupported},
6000 {TGSI_OPCODE_ATOMUMIN, 0, ALU_OP0_NOP, tgsi_unsupported},
6001 {TGSI_OPCODE_ATOMUMAX, 0, ALU_OP0_NOP, tgsi_unsupported},
6002 {TGSI_OPCODE_ATOMIMIN, 0, ALU_OP0_NOP, tgsi_unsupported},
6003 {TGSI_OPCODE_ATOMIMAX, 0, ALU_OP0_NOP, tgsi_unsupported},
6004 {TGSI_OPCODE_TEX2, 0, FETCH_OP_SAMPLE, tgsi_tex},
6005 {TGSI_OPCODE_TXB2, 0, FETCH_OP_SAMPLE_LB, tgsi_tex},
6006 {TGSI_OPCODE_TXL2, 0, FETCH_OP_SAMPLE_L, tgsi_tex},
6007 {TGSI_OPCODE_LAST, 0, ALU_OP0_NOP, tgsi_unsupported},
6008 };
6009
6010 static struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = {
6011 {TGSI_OPCODE_ARL, 0, ALU_OP0_NOP, tgsi_eg_arl},
6012 {TGSI_OPCODE_MOV, 0, ALU_OP1_MOV, tgsi_op2},
6013 {TGSI_OPCODE_LIT, 0, ALU_OP0_NOP, tgsi_lit},
6014 {TGSI_OPCODE_RCP, 0, ALU_OP1_RECIP_IEEE, tgsi_trans_srcx_replicate},
6015 {TGSI_OPCODE_RSQ, 0, ALU_OP1_RECIPSQRT_IEEE, tgsi_rsq},
6016 {TGSI_OPCODE_EXP, 0, ALU_OP0_NOP, tgsi_exp},
6017 {TGSI_OPCODE_LOG, 0, ALU_OP0_NOP, tgsi_log},
6018 {TGSI_OPCODE_MUL, 0, ALU_OP2_MUL, tgsi_op2},
6019 {TGSI_OPCODE_ADD, 0, ALU_OP2_ADD, tgsi_op2},
6020 {TGSI_OPCODE_DP3, 0, ALU_OP2_DOT4, tgsi_dp},
6021 {TGSI_OPCODE_DP4, 0, ALU_OP2_DOT4, tgsi_dp},
6022 {TGSI_OPCODE_DST, 0, ALU_OP0_NOP, tgsi_opdst},
6023 {TGSI_OPCODE_MIN, 0, ALU_OP2_MIN, tgsi_op2},
6024 {TGSI_OPCODE_MAX, 0, ALU_OP2_MAX, tgsi_op2},
6025 {TGSI_OPCODE_SLT, 0, ALU_OP2_SETGT, tgsi_op2_swap},
6026 {TGSI_OPCODE_SGE, 0, ALU_OP2_SETGE, tgsi_op2},
6027 {TGSI_OPCODE_MAD, 1, ALU_OP3_MULADD, tgsi_op3},
6028 {TGSI_OPCODE_SUB, 0, ALU_OP2_ADD, tgsi_op2},
6029 {TGSI_OPCODE_LRP, 0, ALU_OP0_NOP, tgsi_lrp},
6030 {TGSI_OPCODE_CND, 0, ALU_OP0_NOP, tgsi_unsupported},
6031 /* gap */
6032 {20, 0, ALU_OP0_NOP, tgsi_unsupported},
6033 {TGSI_OPCODE_DP2A, 0, ALU_OP0_NOP, tgsi_unsupported},
6034 /* gap */
6035 {22, 0, ALU_OP0_NOP, tgsi_unsupported},
6036 {23, 0, ALU_OP0_NOP, tgsi_unsupported},
6037 {TGSI_OPCODE_FRC, 0, ALU_OP1_FRACT, tgsi_op2},
6038 {TGSI_OPCODE_CLAMP, 0, ALU_OP0_NOP, tgsi_unsupported},
6039 {TGSI_OPCODE_FLR, 0, ALU_OP1_FLOOR, tgsi_op2},
6040 {TGSI_OPCODE_ROUND, 0, ALU_OP1_RNDNE, tgsi_op2},
6041 {TGSI_OPCODE_EX2, 0, ALU_OP1_EXP_IEEE, tgsi_trans_srcx_replicate},
6042 {TGSI_OPCODE_LG2, 0, ALU_OP1_LOG_IEEE, tgsi_trans_srcx_replicate},
6043 {TGSI_OPCODE_POW, 0, ALU_OP0_NOP, tgsi_pow},
6044 {TGSI_OPCODE_XPD, 0, ALU_OP0_NOP, tgsi_xpd},
6045 /* gap */
6046 {32, 0, ALU_OP0_NOP, tgsi_unsupported},
6047 {TGSI_OPCODE_ABS, 0, ALU_OP1_MOV, tgsi_op2},
6048 {TGSI_OPCODE_RCC, 0, ALU_OP0_NOP, tgsi_unsupported},
6049 {TGSI_OPCODE_DPH, 0, ALU_OP2_DOT4, tgsi_dp},
6050 {TGSI_OPCODE_COS, 0, ALU_OP1_COS, tgsi_trig},
6051 {TGSI_OPCODE_DDX, 0, FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
6052 {TGSI_OPCODE_DDY, 0, FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
6053 {TGSI_OPCODE_KILP, 0, ALU_OP2_KILLGT, tgsi_kill}, /* predicated kill */
6054 {TGSI_OPCODE_PK2H, 0, ALU_OP0_NOP, tgsi_unsupported},
6055 {TGSI_OPCODE_PK2US, 0, ALU_OP0_NOP, tgsi_unsupported},
6056 {TGSI_OPCODE_PK4B, 0, ALU_OP0_NOP, tgsi_unsupported},
6057 {TGSI_OPCODE_PK4UB, 0, ALU_OP0_NOP, tgsi_unsupported},
6058 {TGSI_OPCODE_RFL, 0, ALU_OP0_NOP, tgsi_unsupported},
6059 {TGSI_OPCODE_SEQ, 0, ALU_OP2_SETE, tgsi_op2},
6060 {TGSI_OPCODE_SFL, 0, ALU_OP0_NOP, tgsi_unsupported},
6061 {TGSI_OPCODE_SGT, 0, ALU_OP2_SETGT, tgsi_op2},
6062 {TGSI_OPCODE_SIN, 0, ALU_OP1_SIN, tgsi_trig},
6063 {TGSI_OPCODE_SLE, 0, ALU_OP2_SETGE, tgsi_op2_swap},
6064 {TGSI_OPCODE_SNE, 0, ALU_OP2_SETNE, tgsi_op2},
6065 {TGSI_OPCODE_STR, 0, ALU_OP0_NOP, tgsi_unsupported},
6066 {TGSI_OPCODE_TEX, 0, FETCH_OP_SAMPLE, tgsi_tex},
6067 {TGSI_OPCODE_TXD, 0, FETCH_OP_SAMPLE_G, tgsi_tex},
6068 {TGSI_OPCODE_TXP, 0, FETCH_OP_SAMPLE, tgsi_tex},
6069 {TGSI_OPCODE_UP2H, 0, ALU_OP0_NOP, tgsi_unsupported},
6070 {TGSI_OPCODE_UP2US, 0, ALU_OP0_NOP, tgsi_unsupported},
6071 {TGSI_OPCODE_UP4B, 0, ALU_OP0_NOP, tgsi_unsupported},
6072 {TGSI_OPCODE_UP4UB, 0, ALU_OP0_NOP, tgsi_unsupported},
6073 {TGSI_OPCODE_X2D, 0, ALU_OP0_NOP, tgsi_unsupported},
6074 {TGSI_OPCODE_ARA, 0, ALU_OP0_NOP, tgsi_unsupported},
6075 {TGSI_OPCODE_ARR, 0, ALU_OP0_NOP, tgsi_eg_arl},
6076 {TGSI_OPCODE_BRA, 0, ALU_OP0_NOP, tgsi_unsupported},
6077 {TGSI_OPCODE_CAL, 0, ALU_OP0_NOP, tgsi_unsupported},
6078 {TGSI_OPCODE_RET, 0, ALU_OP0_NOP, tgsi_unsupported},
6079 {TGSI_OPCODE_SSG, 0, ALU_OP0_NOP, tgsi_ssg},
6080 {TGSI_OPCODE_CMP, 0, ALU_OP0_NOP, tgsi_cmp},
6081 {TGSI_OPCODE_SCS, 0, ALU_OP0_NOP, tgsi_scs},
6082 {TGSI_OPCODE_TXB, 0, FETCH_OP_SAMPLE_LB, tgsi_tex},
6083 {TGSI_OPCODE_NRM, 0, ALU_OP0_NOP, tgsi_unsupported},
6084 {TGSI_OPCODE_DIV, 0, ALU_OP0_NOP, tgsi_unsupported},
6085 {TGSI_OPCODE_DP2, 0, ALU_OP2_DOT4, tgsi_dp},
6086 {TGSI_OPCODE_TXL, 0, FETCH_OP_SAMPLE_L, tgsi_tex},
6087 {TGSI_OPCODE_BRK, 0, CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
6088 {TGSI_OPCODE_IF, 0, ALU_OP0_NOP, tgsi_if},
6089 /* gap */
6090 {75, 0, ALU_OP0_NOP, tgsi_unsupported},
6091 {76, 0, ALU_OP0_NOP, tgsi_unsupported},
6092 {TGSI_OPCODE_ELSE, 0, ALU_OP0_NOP, tgsi_else},
6093 {TGSI_OPCODE_ENDIF, 0, ALU_OP0_NOP, tgsi_endif},
6094 /* gap */
6095 {79, 0, ALU_OP0_NOP, tgsi_unsupported},
6096 {80, 0, ALU_OP0_NOP, tgsi_unsupported},
6097 {TGSI_OPCODE_PUSHA, 0, ALU_OP0_NOP, tgsi_unsupported},
6098 {TGSI_OPCODE_POPA, 0, ALU_OP0_NOP, tgsi_unsupported},
6099 {TGSI_OPCODE_CEIL, 0, ALU_OP1_CEIL, tgsi_op2},
6100 {TGSI_OPCODE_I2F, 0, ALU_OP1_INT_TO_FLT, tgsi_op2_trans},
6101 {TGSI_OPCODE_NOT, 0, ALU_OP1_NOT_INT, tgsi_op2},
6102 {TGSI_OPCODE_TRUNC, 0, ALU_OP1_TRUNC, tgsi_op2},
6103 {TGSI_OPCODE_SHL, 0, ALU_OP2_LSHL_INT, tgsi_op2},
6104 /* gap */
6105 {88, 0, ALU_OP0_NOP, tgsi_unsupported},
6106 {TGSI_OPCODE_AND, 0, ALU_OP2_AND_INT, tgsi_op2},
6107 {TGSI_OPCODE_OR, 0, ALU_OP2_OR_INT, tgsi_op2},
6108 {TGSI_OPCODE_MOD, 0, ALU_OP0_NOP, tgsi_imod},
6109 {TGSI_OPCODE_XOR, 0, ALU_OP2_XOR_INT, tgsi_op2},
6110 {TGSI_OPCODE_SAD, 0, ALU_OP0_NOP, tgsi_unsupported},
6111 {TGSI_OPCODE_TXF, 0, FETCH_OP_LD, tgsi_tex},
6112 {TGSI_OPCODE_TXQ, 0, FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
6113 {TGSI_OPCODE_CONT, 0, CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
6114 {TGSI_OPCODE_EMIT, 0, ALU_OP0_NOP, tgsi_unsupported},
6115 {TGSI_OPCODE_ENDPRIM, 0, ALU_OP0_NOP, tgsi_unsupported},
6116 {TGSI_OPCODE_BGNLOOP, 0, ALU_OP0_NOP, tgsi_bgnloop},
6117 {TGSI_OPCODE_BGNSUB, 0, ALU_OP0_NOP, tgsi_unsupported},
6118 {TGSI_OPCODE_ENDLOOP, 0, ALU_OP0_NOP, tgsi_endloop},
6119 {TGSI_OPCODE_ENDSUB, 0, ALU_OP0_NOP, tgsi_unsupported},
6120 {TGSI_OPCODE_TXQ_LZ, 0, FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
6121 /* gap */
6122 {104, 0, ALU_OP0_NOP, tgsi_unsupported},
6123 {105, 0, ALU_OP0_NOP, tgsi_unsupported},
6124 {106, 0, ALU_OP0_NOP, tgsi_unsupported},
6125 {TGSI_OPCODE_NOP, 0, ALU_OP0_NOP, tgsi_unsupported},
6126 /* gap */
6127 {108, 0, ALU_OP0_NOP, tgsi_unsupported},
6128 {109, 0, ALU_OP0_NOP, tgsi_unsupported},
6129 {110, 0, ALU_OP0_NOP, tgsi_unsupported},
6130 {111, 0, ALU_OP0_NOP, tgsi_unsupported},
6131 {TGSI_OPCODE_NRM4, 0, ALU_OP0_NOP, tgsi_unsupported},
6132 {TGSI_OPCODE_CALLNZ, 0, ALU_OP0_NOP, tgsi_unsupported},
6133 {TGSI_OPCODE_IFC, 0, ALU_OP0_NOP, tgsi_unsupported},
6134 {TGSI_OPCODE_BREAKC, 0, ALU_OP0_NOP, tgsi_unsupported},
6135 {TGSI_OPCODE_KIL, 0, ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
6136 {TGSI_OPCODE_END, 0, ALU_OP0_NOP, tgsi_end}, /* aka HALT */
6137 /* gap */
6138 {118, 0, ALU_OP0_NOP, tgsi_unsupported},
6139 {TGSI_OPCODE_F2I, 0, ALU_OP1_FLT_TO_INT, tgsi_f2i},
6140 {TGSI_OPCODE_IDIV, 0, ALU_OP0_NOP, tgsi_idiv},
6141 {TGSI_OPCODE_IMAX, 0, ALU_OP2_MAX_INT, tgsi_op2},
6142 {TGSI_OPCODE_IMIN, 0, ALU_OP2_MIN_INT, tgsi_op2},
6143 {TGSI_OPCODE_INEG, 0, ALU_OP2_SUB_INT, tgsi_ineg},
6144 {TGSI_OPCODE_ISGE, 0, ALU_OP2_SETGE_INT, tgsi_op2},
6145 {TGSI_OPCODE_ISHR, 0, ALU_OP2_ASHR_INT, tgsi_op2},
6146 {TGSI_OPCODE_ISLT, 0, ALU_OP2_SETGT_INT, tgsi_op2_swap},
6147 {TGSI_OPCODE_F2U, 0, ALU_OP1_FLT_TO_UINT, tgsi_f2i},
6148 {TGSI_OPCODE_U2F, 0, ALU_OP1_UINT_TO_FLT, tgsi_op2_trans},
6149 {TGSI_OPCODE_UADD, 0, ALU_OP2_ADD_INT, tgsi_op2},
6150 {TGSI_OPCODE_UDIV, 0, ALU_OP0_NOP, tgsi_udiv},
6151 {TGSI_OPCODE_UMAD, 0, ALU_OP0_NOP, tgsi_umad},
6152 {TGSI_OPCODE_UMAX, 0, ALU_OP2_MAX_UINT, tgsi_op2},
6153 {TGSI_OPCODE_UMIN, 0, ALU_OP2_MIN_UINT, tgsi_op2},
6154 {TGSI_OPCODE_UMOD, 0, ALU_OP0_NOP, tgsi_umod},
6155 {TGSI_OPCODE_UMUL, 0, ALU_OP2_MULLO_UINT, tgsi_op2_trans},
6156 {TGSI_OPCODE_USEQ, 0, ALU_OP2_SETE_INT, tgsi_op2},
6157 {TGSI_OPCODE_USGE, 0, ALU_OP2_SETGE_UINT, tgsi_op2},
6158 {TGSI_OPCODE_USHR, 0, ALU_OP2_LSHR_INT, tgsi_op2},
6159 {TGSI_OPCODE_USLT, 0, ALU_OP2_SETGT_UINT, tgsi_op2_swap},
6160 {TGSI_OPCODE_USNE, 0, ALU_OP2_SETNE_INT, tgsi_op2},
6161 {TGSI_OPCODE_SWITCH, 0, ALU_OP0_NOP, tgsi_unsupported},
6162 {TGSI_OPCODE_CASE, 0, ALU_OP0_NOP, tgsi_unsupported},
6163 {TGSI_OPCODE_DEFAULT, 0, ALU_OP0_NOP, tgsi_unsupported},
6164 {TGSI_OPCODE_ENDSWITCH, 0, ALU_OP0_NOP, tgsi_unsupported},
6165 {TGSI_OPCODE_SAMPLE, 0, 0, tgsi_unsupported},
6166 {TGSI_OPCODE_SAMPLE_I, 0, 0, tgsi_unsupported},
6167 {TGSI_OPCODE_SAMPLE_I_MS, 0, 0, tgsi_unsupported},
6168 {TGSI_OPCODE_SAMPLE_B, 0, 0, tgsi_unsupported},
6169 {TGSI_OPCODE_SAMPLE_C, 0, 0, tgsi_unsupported},
6170 {TGSI_OPCODE_SAMPLE_C_LZ, 0, 0, tgsi_unsupported},
6171 {TGSI_OPCODE_SAMPLE_D, 0, 0, tgsi_unsupported},
6172 {TGSI_OPCODE_SAMPLE_L, 0, 0, tgsi_unsupported},
6173 {TGSI_OPCODE_GATHER4, 0, 0, tgsi_unsupported},
6174 {TGSI_OPCODE_SVIEWINFO, 0, 0, tgsi_unsupported},
6175 {TGSI_OPCODE_SAMPLE_POS, 0, 0, tgsi_unsupported},
6176 {TGSI_OPCODE_SAMPLE_INFO, 0, 0, tgsi_unsupported},
6177 {TGSI_OPCODE_UARL, 0, ALU_OP1_MOVA_INT, tgsi_eg_arl},
6178 {TGSI_OPCODE_UCMP, 0, ALU_OP0_NOP, tgsi_ucmp},
6179 {TGSI_OPCODE_IABS, 0, 0, tgsi_iabs},
6180 {TGSI_OPCODE_ISSG, 0, 0, tgsi_issg},
6181 {TGSI_OPCODE_LOAD, 0, ALU_OP0_NOP, tgsi_unsupported},
6182 {TGSI_OPCODE_STORE, 0, ALU_OP0_NOP, tgsi_unsupported},
6183 {TGSI_OPCODE_MFENCE, 0, ALU_OP0_NOP, tgsi_unsupported},
6184 {TGSI_OPCODE_LFENCE, 0, ALU_OP0_NOP, tgsi_unsupported},
6185 {TGSI_OPCODE_SFENCE, 0, ALU_OP0_NOP, tgsi_unsupported},
6186 {TGSI_OPCODE_BARRIER, 0, ALU_OP0_NOP, tgsi_unsupported},
6187 {TGSI_OPCODE_ATOMUADD, 0, ALU_OP0_NOP, tgsi_unsupported},
6188 {TGSI_OPCODE_ATOMXCHG, 0, ALU_OP0_NOP, tgsi_unsupported},
6189 {TGSI_OPCODE_ATOMCAS, 0, ALU_OP0_NOP, tgsi_unsupported},
6190 {TGSI_OPCODE_ATOMAND, 0, ALU_OP0_NOP, tgsi_unsupported},
6191 {TGSI_OPCODE_ATOMOR, 0, ALU_OP0_NOP, tgsi_unsupported},
6192 {TGSI_OPCODE_ATOMXOR, 0, ALU_OP0_NOP, tgsi_unsupported},
6193 {TGSI_OPCODE_ATOMUMIN, 0, ALU_OP0_NOP, tgsi_unsupported},
6194 {TGSI_OPCODE_ATOMUMAX, 0, ALU_OP0_NOP, tgsi_unsupported},
6195 {TGSI_OPCODE_ATOMIMIN, 0, ALU_OP0_NOP, tgsi_unsupported},
6196 {TGSI_OPCODE_ATOMIMAX, 0, ALU_OP0_NOP, tgsi_unsupported},
6197 {TGSI_OPCODE_TEX2, 0, FETCH_OP_SAMPLE, tgsi_tex},
6198 {TGSI_OPCODE_TXB2, 0, FETCH_OP_SAMPLE_LB, tgsi_tex},
6199 {TGSI_OPCODE_TXL2, 0, FETCH_OP_SAMPLE_L, tgsi_tex},
6200 {TGSI_OPCODE_LAST, 0, ALU_OP0_NOP, tgsi_unsupported},
6201 };
6202
6203 static struct r600_shader_tgsi_instruction cm_shader_tgsi_instruction[] = {
6204 {TGSI_OPCODE_ARL, 0, ALU_OP0_NOP, tgsi_eg_arl},
6205 {TGSI_OPCODE_MOV, 0, ALU_OP1_MOV, tgsi_op2},
6206 {TGSI_OPCODE_LIT, 0, ALU_OP0_NOP, tgsi_lit},
6207 {TGSI_OPCODE_RCP, 0, ALU_OP1_RECIP_IEEE, cayman_emit_float_instr},
6208 {TGSI_OPCODE_RSQ, 0, ALU_OP1_RECIPSQRT_IEEE, cayman_emit_float_instr},
6209 {TGSI_OPCODE_EXP, 0, ALU_OP0_NOP, tgsi_exp},
6210 {TGSI_OPCODE_LOG, 0, ALU_OP0_NOP, tgsi_log},
6211 {TGSI_OPCODE_MUL, 0, ALU_OP2_MUL, tgsi_op2},
6212 {TGSI_OPCODE_ADD, 0, ALU_OP2_ADD, tgsi_op2},
6213 {TGSI_OPCODE_DP3, 0, ALU_OP2_DOT4, tgsi_dp},
6214 {TGSI_OPCODE_DP4, 0, ALU_OP2_DOT4, tgsi_dp},
6215 {TGSI_OPCODE_DST, 0, ALU_OP0_NOP, tgsi_opdst},
6216 {TGSI_OPCODE_MIN, 0, ALU_OP2_MIN, tgsi_op2},
6217 {TGSI_OPCODE_MAX, 0, ALU_OP2_MAX, tgsi_op2},
6218 {TGSI_OPCODE_SLT, 0, ALU_OP2_SETGT, tgsi_op2_swap},
6219 {TGSI_OPCODE_SGE, 0, ALU_OP2_SETGE, tgsi_op2},
6220 {TGSI_OPCODE_MAD, 1, ALU_OP3_MULADD, tgsi_op3},
6221 {TGSI_OPCODE_SUB, 0, ALU_OP2_ADD, tgsi_op2},
6222 {TGSI_OPCODE_LRP, 0, ALU_OP0_NOP, tgsi_lrp},
6223 {TGSI_OPCODE_CND, 0, ALU_OP0_NOP, tgsi_unsupported},
6224 /* gap */
6225 {20, 0, ALU_OP0_NOP, tgsi_unsupported},
6226 {TGSI_OPCODE_DP2A, 0, ALU_OP0_NOP, tgsi_unsupported},
6227 /* gap */
6228 {22, 0, ALU_OP0_NOP, tgsi_unsupported},
6229 {23, 0, ALU_OP0_NOP, tgsi_unsupported},
6230 {TGSI_OPCODE_FRC, 0, ALU_OP1_FRACT, tgsi_op2},
6231 {TGSI_OPCODE_CLAMP, 0, ALU_OP0_NOP, tgsi_unsupported},
6232 {TGSI_OPCODE_FLR, 0, ALU_OP1_FLOOR, tgsi_op2},
6233 {TGSI_OPCODE_ROUND, 0, ALU_OP1_RNDNE, tgsi_op2},
6234 {TGSI_OPCODE_EX2, 0, ALU_OP1_EXP_IEEE, cayman_emit_float_instr},
6235 {TGSI_OPCODE_LG2, 0, ALU_OP1_LOG_IEEE, cayman_emit_float_instr},
6236 {TGSI_OPCODE_POW, 0, ALU_OP0_NOP, cayman_pow},
6237 {TGSI_OPCODE_XPD, 0, ALU_OP0_NOP, tgsi_xpd},
6238 /* gap */
6239 {32, 0, ALU_OP0_NOP, tgsi_unsupported},
6240 {TGSI_OPCODE_ABS, 0, ALU_OP1_MOV, tgsi_op2},
6241 {TGSI_OPCODE_RCC, 0, ALU_OP0_NOP, tgsi_unsupported},
6242 {TGSI_OPCODE_DPH, 0, ALU_OP2_DOT4, tgsi_dp},
6243 {TGSI_OPCODE_COS, 0, ALU_OP1_COS, cayman_trig},
6244 {TGSI_OPCODE_DDX, 0, FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
6245 {TGSI_OPCODE_DDY, 0, FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
6246 {TGSI_OPCODE_KILP, 0, ALU_OP2_KILLGT, tgsi_kill}, /* predicated kill */
6247 {TGSI_OPCODE_PK2H, 0, ALU_OP0_NOP, tgsi_unsupported},
6248 {TGSI_OPCODE_PK2US, 0, ALU_OP0_NOP, tgsi_unsupported},
6249 {TGSI_OPCODE_PK4B, 0, ALU_OP0_NOP, tgsi_unsupported},
6250 {TGSI_OPCODE_PK4UB, 0, ALU_OP0_NOP, tgsi_unsupported},
6251 {TGSI_OPCODE_RFL, 0, ALU_OP0_NOP, tgsi_unsupported},
6252 {TGSI_OPCODE_SEQ, 0, ALU_OP2_SETE, tgsi_op2},
6253 {TGSI_OPCODE_SFL, 0, ALU_OP0_NOP, tgsi_unsupported},
6254 {TGSI_OPCODE_SGT, 0, ALU_OP2_SETGT, tgsi_op2},
6255 {TGSI_OPCODE_SIN, 0, ALU_OP1_SIN, cayman_trig},
6256 {TGSI_OPCODE_SLE, 0, ALU_OP2_SETGE, tgsi_op2_swap},
6257 {TGSI_OPCODE_SNE, 0, ALU_OP2_SETNE, tgsi_op2},
6258 {TGSI_OPCODE_STR, 0, ALU_OP0_NOP, tgsi_unsupported},
6259 {TGSI_OPCODE_TEX, 0, FETCH_OP_SAMPLE, tgsi_tex},
6260 {TGSI_OPCODE_TXD, 0, FETCH_OP_SAMPLE_G, tgsi_tex},
6261 {TGSI_OPCODE_TXP, 0, FETCH_OP_SAMPLE, tgsi_tex},
6262 {TGSI_OPCODE_UP2H, 0, ALU_OP0_NOP, tgsi_unsupported},
6263 {TGSI_OPCODE_UP2US, 0, ALU_OP0_NOP, tgsi_unsupported},
6264 {TGSI_OPCODE_UP4B, 0, ALU_OP0_NOP, tgsi_unsupported},
6265 {TGSI_OPCODE_UP4UB, 0, ALU_OP0_NOP, tgsi_unsupported},
6266 {TGSI_OPCODE_X2D, 0, ALU_OP0_NOP, tgsi_unsupported},
6267 {TGSI_OPCODE_ARA, 0, ALU_OP0_NOP, tgsi_unsupported},
6268 {TGSI_OPCODE_ARR, 0, ALU_OP0_NOP, tgsi_eg_arl},
6269 {TGSI_OPCODE_BRA, 0, ALU_OP0_NOP, tgsi_unsupported},
6270 {TGSI_OPCODE_CAL, 0, ALU_OP0_NOP, tgsi_unsupported},
6271 {TGSI_OPCODE_RET, 0, ALU_OP0_NOP, tgsi_unsupported},
6272 {TGSI_OPCODE_SSG, 0, ALU_OP0_NOP, tgsi_ssg},
6273 {TGSI_OPCODE_CMP, 0, ALU_OP0_NOP, tgsi_cmp},
6274 {TGSI_OPCODE_SCS, 0, ALU_OP0_NOP, tgsi_scs},
6275 {TGSI_OPCODE_TXB, 0, FETCH_OP_SAMPLE_LB, tgsi_tex},
6276 {TGSI_OPCODE_NRM, 0, ALU_OP0_NOP, tgsi_unsupported},
6277 {TGSI_OPCODE_DIV, 0, ALU_OP0_NOP, tgsi_unsupported},
6278 {TGSI_OPCODE_DP2, 0, ALU_OP2_DOT4, tgsi_dp},
6279 {TGSI_OPCODE_TXL, 0, FETCH_OP_SAMPLE_L, tgsi_tex},
6280 {TGSI_OPCODE_BRK, 0, CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
6281 {TGSI_OPCODE_IF, 0, ALU_OP0_NOP, tgsi_if},
6282 /* gap */
6283 {75, 0, ALU_OP0_NOP, tgsi_unsupported},
6284 {76, 0, ALU_OP0_NOP, tgsi_unsupported},
6285 {TGSI_OPCODE_ELSE, 0, ALU_OP0_NOP, tgsi_else},
6286 {TGSI_OPCODE_ENDIF, 0, ALU_OP0_NOP, tgsi_endif},
6287 /* gap */
6288 {79, 0, ALU_OP0_NOP, tgsi_unsupported},
6289 {80, 0, ALU_OP0_NOP, tgsi_unsupported},
6290 {TGSI_OPCODE_PUSHA, 0, ALU_OP0_NOP, tgsi_unsupported},
6291 {TGSI_OPCODE_POPA, 0, ALU_OP0_NOP, tgsi_unsupported},
6292 {TGSI_OPCODE_CEIL, 0, ALU_OP1_CEIL, tgsi_op2},
6293 {TGSI_OPCODE_I2F, 0, ALU_OP1_INT_TO_FLT, tgsi_op2},
6294 {TGSI_OPCODE_NOT, 0, ALU_OP1_NOT_INT, tgsi_op2},
6295 {TGSI_OPCODE_TRUNC, 0, ALU_OP1_TRUNC, tgsi_op2},
6296 {TGSI_OPCODE_SHL, 0, ALU_OP2_LSHL_INT, tgsi_op2},
6297 /* gap */
6298 {88, 0, ALU_OP0_NOP, tgsi_unsupported},
6299 {TGSI_OPCODE_AND, 0, ALU_OP2_AND_INT, tgsi_op2},
6300 {TGSI_OPCODE_OR, 0, ALU_OP2_OR_INT, tgsi_op2},
6301 {TGSI_OPCODE_MOD, 0, ALU_OP0_NOP, tgsi_imod},
6302 {TGSI_OPCODE_XOR, 0, ALU_OP2_XOR_INT, tgsi_op2},
6303 {TGSI_OPCODE_SAD, 0, ALU_OP0_NOP, tgsi_unsupported},
6304 {TGSI_OPCODE_TXF, 0, FETCH_OP_LD, tgsi_tex},
6305 {TGSI_OPCODE_TXQ, 0, FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
6306 {TGSI_OPCODE_CONT, 0, CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
6307 {TGSI_OPCODE_EMIT, 0, ALU_OP0_NOP, tgsi_unsupported},
6308 {TGSI_OPCODE_ENDPRIM, 0, ALU_OP0_NOP, tgsi_unsupported},
6309 {TGSI_OPCODE_BGNLOOP, 0, ALU_OP0_NOP, tgsi_bgnloop},
6310 {TGSI_OPCODE_BGNSUB, 0, ALU_OP0_NOP, tgsi_unsupported},
6311 {TGSI_OPCODE_ENDLOOP, 0, ALU_OP0_NOP, tgsi_endloop},
6312 {TGSI_OPCODE_ENDSUB, 0, ALU_OP0_NOP, tgsi_unsupported},
6313 {TGSI_OPCODE_TXQ_LZ, 0, FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
6314 /* gap */
6315 {104, 0, ALU_OP0_NOP, tgsi_unsupported},
6316 {105, 0, ALU_OP0_NOP, tgsi_unsupported},
6317 {106, 0, ALU_OP0_NOP, tgsi_unsupported},
6318 {TGSI_OPCODE_NOP, 0, ALU_OP0_NOP, tgsi_unsupported},
6319 /* gap */
6320 {108, 0, ALU_OP0_NOP, tgsi_unsupported},
6321 {109, 0, ALU_OP0_NOP, tgsi_unsupported},
6322 {110, 0, ALU_OP0_NOP, tgsi_unsupported},
6323 {111, 0, ALU_OP0_NOP, tgsi_unsupported},
6324 {TGSI_OPCODE_NRM4, 0, ALU_OP0_NOP, tgsi_unsupported},
6325 {TGSI_OPCODE_CALLNZ, 0, ALU_OP0_NOP, tgsi_unsupported},
6326 {TGSI_OPCODE_IFC, 0, ALU_OP0_NOP, tgsi_unsupported},
6327 {TGSI_OPCODE_BREAKC, 0, ALU_OP0_NOP, tgsi_unsupported},
6328 {TGSI_OPCODE_KIL, 0, ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
6329 {TGSI_OPCODE_END, 0, ALU_OP0_NOP, tgsi_end}, /* aka HALT */
6330 /* gap */
6331 {118, 0, ALU_OP0_NOP, tgsi_unsupported},
6332 {TGSI_OPCODE_F2I, 0, ALU_OP1_FLT_TO_INT, tgsi_op2},
6333 {TGSI_OPCODE_IDIV, 0, ALU_OP0_NOP, tgsi_idiv},
6334 {TGSI_OPCODE_IMAX, 0, ALU_OP2_MAX_INT, tgsi_op2},
6335 {TGSI_OPCODE_IMIN, 0, ALU_OP2_MIN_INT, tgsi_op2},
6336 {TGSI_OPCODE_INEG, 0, ALU_OP2_SUB_INT, tgsi_ineg},
6337 {TGSI_OPCODE_ISGE, 0, ALU_OP2_SETGE_INT, tgsi_op2},
6338 {TGSI_OPCODE_ISHR, 0, ALU_OP2_ASHR_INT, tgsi_op2},
6339 {TGSI_OPCODE_ISLT, 0, ALU_OP2_SETGT_INT, tgsi_op2_swap},
6340 {TGSI_OPCODE_F2U, 0, ALU_OP1_FLT_TO_UINT, tgsi_op2},
6341 {TGSI_OPCODE_U2F, 0, ALU_OP1_UINT_TO_FLT, tgsi_op2},
6342 {TGSI_OPCODE_UADD, 0, ALU_OP2_ADD_INT, tgsi_op2},
6343 {TGSI_OPCODE_UDIV, 0, ALU_OP0_NOP, tgsi_udiv},
6344 {TGSI_OPCODE_UMAD, 0, ALU_OP0_NOP, tgsi_umad},
6345 {TGSI_OPCODE_UMAX, 0, ALU_OP2_MAX_UINT, tgsi_op2},
6346 {TGSI_OPCODE_UMIN, 0, ALU_OP2_MIN_UINT, tgsi_op2},
6347 {TGSI_OPCODE_UMOD, 0, ALU_OP0_NOP, tgsi_umod},
6348 {TGSI_OPCODE_UMUL, 0, ALU_OP2_MULLO_INT, cayman_mul_int_instr},
6349 {TGSI_OPCODE_USEQ, 0, ALU_OP2_SETE_INT, tgsi_op2},
6350 {TGSI_OPCODE_USGE, 0, ALU_OP2_SETGE_UINT, tgsi_op2},
6351 {TGSI_OPCODE_USHR, 0, ALU_OP2_LSHR_INT, tgsi_op2},
6352 {TGSI_OPCODE_USLT, 0, ALU_OP2_SETGT_UINT, tgsi_op2_swap},
6353 {TGSI_OPCODE_USNE, 0, ALU_OP2_SETNE_INT, tgsi_op2},
6354 {TGSI_OPCODE_SWITCH, 0, ALU_OP0_NOP, tgsi_unsupported},
6355 {TGSI_OPCODE_CASE, 0, ALU_OP0_NOP, tgsi_unsupported},
6356 {TGSI_OPCODE_DEFAULT, 0, ALU_OP0_NOP, tgsi_unsupported},
6357 {TGSI_OPCODE_ENDSWITCH, 0, ALU_OP0_NOP, tgsi_unsupported},
6358 {TGSI_OPCODE_SAMPLE, 0, 0, tgsi_unsupported},
6359 {TGSI_OPCODE_SAMPLE_I, 0, 0, tgsi_unsupported},
6360 {TGSI_OPCODE_SAMPLE_I_MS, 0, 0, tgsi_unsupported},
6361 {TGSI_OPCODE_SAMPLE_B, 0, 0, tgsi_unsupported},
6362 {TGSI_OPCODE_SAMPLE_C, 0, 0, tgsi_unsupported},
6363 {TGSI_OPCODE_SAMPLE_C_LZ, 0, 0, tgsi_unsupported},
6364 {TGSI_OPCODE_SAMPLE_D, 0, 0, tgsi_unsupported},
6365 {TGSI_OPCODE_SAMPLE_L, 0, 0, tgsi_unsupported},
6366 {TGSI_OPCODE_GATHER4, 0, 0, tgsi_unsupported},
6367 {TGSI_OPCODE_SVIEWINFO, 0, 0, tgsi_unsupported},
6368 {TGSI_OPCODE_SAMPLE_POS, 0, 0, tgsi_unsupported},
6369 {TGSI_OPCODE_SAMPLE_INFO, 0, 0, tgsi_unsupported},
6370 {TGSI_OPCODE_UARL, 0, ALU_OP1_MOVA_INT, tgsi_eg_arl},
6371 {TGSI_OPCODE_UCMP, 0, ALU_OP0_NOP, tgsi_ucmp},
6372 {TGSI_OPCODE_IABS, 0, 0, tgsi_iabs},
6373 {TGSI_OPCODE_ISSG, 0, 0, tgsi_issg},
6374 {TGSI_OPCODE_LOAD, 0, ALU_OP0_NOP, tgsi_unsupported},
6375 {TGSI_OPCODE_STORE, 0, ALU_OP0_NOP, tgsi_unsupported},
6376 {TGSI_OPCODE_MFENCE, 0, ALU_OP0_NOP, tgsi_unsupported},
6377 {TGSI_OPCODE_LFENCE, 0, ALU_OP0_NOP, tgsi_unsupported},
6378 {TGSI_OPCODE_SFENCE, 0, ALU_OP0_NOP, tgsi_unsupported},
6379 {TGSI_OPCODE_BARRIER, 0, ALU_OP0_NOP, tgsi_unsupported},
6380 {TGSI_OPCODE_ATOMUADD, 0, ALU_OP0_NOP, tgsi_unsupported},
6381 {TGSI_OPCODE_ATOMXCHG, 0, ALU_OP0_NOP, tgsi_unsupported},
6382 {TGSI_OPCODE_ATOMCAS, 0, ALU_OP0_NOP, tgsi_unsupported},
6383 {TGSI_OPCODE_ATOMAND, 0, ALU_OP0_NOP, tgsi_unsupported},
6384 {TGSI_OPCODE_ATOMOR, 0, ALU_OP0_NOP, tgsi_unsupported},
6385 {TGSI_OPCODE_ATOMXOR, 0, ALU_OP0_NOP, tgsi_unsupported},
6386 {TGSI_OPCODE_ATOMUMIN, 0, ALU_OP0_NOP, tgsi_unsupported},
6387 {TGSI_OPCODE_ATOMUMAX, 0, ALU_OP0_NOP, tgsi_unsupported},
6388 {TGSI_OPCODE_ATOMIMIN, 0, ALU_OP0_NOP, tgsi_unsupported},
6389 {TGSI_OPCODE_ATOMIMAX, 0, ALU_OP0_NOP, tgsi_unsupported},
6390 {TGSI_OPCODE_TEX2, 0, FETCH_OP_SAMPLE, tgsi_tex},
6391 {TGSI_OPCODE_TXB2, 0, FETCH_OP_SAMPLE_LB, tgsi_tex},
6392 {TGSI_OPCODE_TXL2, 0, FETCH_OP_SAMPLE_L, tgsi_tex},
6393 {TGSI_OPCODE_LAST, 0, ALU_OP0_NOP, tgsi_unsupported},
6394 };