6233753e71678ca021bd5ba37fc6835d55175f84
[mesa.git] / src / gallium / drivers / r600 / r600_shader.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "r600_sq.h"
24 #include "r600_llvm.h"
25 #include "r600_formats.h"
26 #include "r600_opcodes.h"
27 #include "r600_shader.h"
28 #include "r600d.h"
29
30 #include "sb/sb_public.h"
31
32 #include "pipe/p_shader_tokens.h"
33 #include "tgsi/tgsi_info.h"
34 #include "tgsi/tgsi_parse.h"
35 #include "tgsi/tgsi_scan.h"
36 #include "tgsi/tgsi_dump.h"
37 #include "util/u_memory.h"
38 #include "util/u_math.h"
39 #include <stdio.h>
40 #include <errno.h>
41
42 /* CAYMAN notes
43 Why CAYMAN got loops for lots of instructions is explained here.
44
45 -These 8xx t-slot only ops are implemented in all vector slots.
46 MUL_LIT, FLT_TO_UINT, INT_TO_FLT, UINT_TO_FLT
47 These 8xx t-slot only opcodes become vector ops, with all four
48 slots expecting the arguments on sources a and b. Result is
49 broadcast to all channels.
50 MULLO_INT, MULHI_INT, MULLO_UINT, MULHI_UINT, MUL_64
51 These 8xx t-slot only opcodes become vector ops in the z, y, and
52 x slots.
53 EXP_IEEE, LOG_IEEE/CLAMPED, RECIP_IEEE/CLAMPED/FF/INT/UINT/_64/CLAMPED_64
54 RECIPSQRT_IEEE/CLAMPED/FF/_64/CLAMPED_64
55 SQRT_IEEE/_64
56 SIN/COS
57 The w slot may have an independent co-issued operation, or if the
58 result is required to be in the w slot, the opcode above may be
59 issued in the w slot as well.
60 The compiler must issue the source argument to slots z, y, and x
61 */
62
63 #define R600_SHADER_BUFFER_INFO_SEL (512 + R600_BUFFER_INFO_OFFSET / 16)
64 static int r600_shader_from_tgsi(struct r600_context *rctx,
65 struct r600_pipe_shader *pipeshader,
66 union r600_shader_key key);
67
68
69 static void r600_add_gpr_array(struct r600_shader *ps, int start_gpr,
70 int size, unsigned comp_mask) {
71
72 if (!size)
73 return;
74
75 if (ps->num_arrays == ps->max_arrays) {
76 ps->max_arrays += 64;
77 ps->arrays = realloc(ps->arrays, ps->max_arrays *
78 sizeof(struct r600_shader_array));
79 }
80
81 int n = ps->num_arrays;
82 ++ps->num_arrays;
83
84 ps->arrays[n].comp_mask = comp_mask;
85 ps->arrays[n].gpr_start = start_gpr;
86 ps->arrays[n].gpr_count = size;
87 }
88
89 static void r600_dump_streamout(struct pipe_stream_output_info *so)
90 {
91 unsigned i;
92
93 fprintf(stderr, "STREAMOUT\n");
94 for (i = 0; i < so->num_outputs; i++) {
95 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
96 so->output[i].start_component;
97 fprintf(stderr, " %i: MEM_STREAM%d_BUF%i[%i..%i] <- OUT[%i].%s%s%s%s%s\n",
98 i,
99 so->output[i].stream,
100 so->output[i].output_buffer,
101 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
102 so->output[i].register_index,
103 mask & 1 ? "x" : "",
104 mask & 2 ? "y" : "",
105 mask & 4 ? "z" : "",
106 mask & 8 ? "w" : "",
107 so->output[i].dst_offset < so->output[i].start_component ? " (will lower)" : "");
108 }
109 }
110
111 static int store_shader(struct pipe_context *ctx,
112 struct r600_pipe_shader *shader)
113 {
114 struct r600_context *rctx = (struct r600_context *)ctx;
115 uint32_t *ptr, i;
116
117 if (shader->bo == NULL) {
118 shader->bo = (struct r600_resource*)
119 pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE, shader->shader.bc.ndw * 4);
120 if (shader->bo == NULL) {
121 return -ENOMEM;
122 }
123 ptr = r600_buffer_map_sync_with_rings(&rctx->b, shader->bo, PIPE_TRANSFER_WRITE);
124 if (R600_BIG_ENDIAN) {
125 for (i = 0; i < shader->shader.bc.ndw; ++i) {
126 ptr[i] = util_cpu_to_le32(shader->shader.bc.bytecode[i]);
127 }
128 } else {
129 memcpy(ptr, shader->shader.bc.bytecode, shader->shader.bc.ndw * sizeof(*ptr));
130 }
131 rctx->b.ws->buffer_unmap(shader->bo->cs_buf);
132 }
133
134 return 0;
135 }
136
137 int r600_pipe_shader_create(struct pipe_context *ctx,
138 struct r600_pipe_shader *shader,
139 union r600_shader_key key)
140 {
141 struct r600_context *rctx = (struct r600_context *)ctx;
142 struct r600_pipe_shader_selector *sel = shader->selector;
143 int r;
144 bool dump = r600_can_dump_shader(&rctx->screen->b, sel->tokens);
145 unsigned use_sb = !(rctx->screen->b.debug_flags & DBG_NO_SB);
146 unsigned sb_disasm = use_sb || (rctx->screen->b.debug_flags & DBG_SB_DISASM);
147 unsigned export_shader;
148
149 shader->shader.bc.isa = rctx->isa;
150
151 if (dump) {
152 fprintf(stderr, "--------------------------------------------------------------\n");
153 tgsi_dump(sel->tokens, 0);
154
155 if (sel->so.num_outputs) {
156 r600_dump_streamout(&sel->so);
157 }
158 }
159 r = r600_shader_from_tgsi(rctx, shader, key);
160 if (r) {
161 R600_ERR("translation from TGSI failed !\n");
162 goto error;
163 }
164
165 /* disable SB for shaders using doubles */
166 use_sb &= !shader->shader.uses_doubles;
167
168 /* Check if the bytecode has already been built. When using the llvm
169 * backend, r600_shader_from_tgsi() will take care of building the
170 * bytecode.
171 */
172 if (!shader->shader.bc.bytecode) {
173 r = r600_bytecode_build(&shader->shader.bc);
174 if (r) {
175 R600_ERR("building bytecode failed !\n");
176 goto error;
177 }
178 }
179
180 if (dump && !sb_disasm) {
181 fprintf(stderr, "--------------------------------------------------------------\n");
182 r600_bytecode_disasm(&shader->shader.bc);
183 fprintf(stderr, "______________________________________________________________\n");
184 } else if ((dump && sb_disasm) || use_sb) {
185 r = r600_sb_bytecode_process(rctx, &shader->shader.bc, &shader->shader,
186 dump, use_sb);
187 if (r) {
188 R600_ERR("r600_sb_bytecode_process failed !\n");
189 goto error;
190 }
191 }
192
193 if (shader->gs_copy_shader) {
194 if (dump) {
195 // dump copy shader
196 r = r600_sb_bytecode_process(rctx, &shader->gs_copy_shader->shader.bc,
197 &shader->gs_copy_shader->shader, dump, 0);
198 if (r)
199 goto error;
200 }
201
202 if ((r = store_shader(ctx, shader->gs_copy_shader)))
203 goto error;
204 }
205
206 /* Store the shader in a buffer. */
207 if ((r = store_shader(ctx, shader)))
208 goto error;
209
210 /* Build state. */
211 switch (shader->shader.processor_type) {
212 case TGSI_PROCESSOR_GEOMETRY:
213 if (rctx->b.chip_class >= EVERGREEN) {
214 evergreen_update_gs_state(ctx, shader);
215 evergreen_update_vs_state(ctx, shader->gs_copy_shader);
216 } else {
217 r600_update_gs_state(ctx, shader);
218 r600_update_vs_state(ctx, shader->gs_copy_shader);
219 }
220 break;
221 case TGSI_PROCESSOR_VERTEX:
222 export_shader = key.vs.as_es;
223 if (rctx->b.chip_class >= EVERGREEN) {
224 if (export_shader)
225 evergreen_update_es_state(ctx, shader);
226 else
227 evergreen_update_vs_state(ctx, shader);
228 } else {
229 if (export_shader)
230 r600_update_es_state(ctx, shader);
231 else
232 r600_update_vs_state(ctx, shader);
233 }
234 break;
235 case TGSI_PROCESSOR_FRAGMENT:
236 if (rctx->b.chip_class >= EVERGREEN) {
237 evergreen_update_ps_state(ctx, shader);
238 } else {
239 r600_update_ps_state(ctx, shader);
240 }
241 break;
242 default:
243 r = -EINVAL;
244 goto error;
245 }
246 return 0;
247
248 error:
249 r600_pipe_shader_destroy(ctx, shader);
250 return r;
251 }
252
253 void r600_pipe_shader_destroy(struct pipe_context *ctx, struct r600_pipe_shader *shader)
254 {
255 pipe_resource_reference((struct pipe_resource**)&shader->bo, NULL);
256 r600_bytecode_clear(&shader->shader.bc);
257 r600_release_command_buffer(&shader->command_buffer);
258 }
259
260 /*
261 * tgsi -> r600 shader
262 */
263 struct r600_shader_tgsi_instruction;
264
265 struct r600_shader_src {
266 unsigned sel;
267 unsigned swizzle[4];
268 unsigned neg;
269 unsigned abs;
270 unsigned rel;
271 unsigned kc_bank;
272 boolean kc_rel; /* true if cache bank is indexed */
273 uint32_t value[4];
274 };
275
276 struct eg_interp {
277 boolean enabled;
278 unsigned ij_index;
279 };
280
281 struct r600_shader_ctx {
282 struct tgsi_shader_info info;
283 struct tgsi_parse_context parse;
284 const struct tgsi_token *tokens;
285 unsigned type;
286 unsigned file_offset[TGSI_FILE_COUNT];
287 unsigned temp_reg;
288 const struct r600_shader_tgsi_instruction *inst_info;
289 struct r600_bytecode *bc;
290 struct r600_shader *shader;
291 struct r600_shader_src src[4];
292 uint32_t *literals;
293 uint32_t nliterals;
294 uint32_t max_driver_temp_used;
295 boolean use_llvm;
296 /* needed for evergreen interpolation */
297 struct eg_interp eg_interpolators[6]; // indexed by Persp/Linear * 3 + sample/center/centroid
298 /* evergreen/cayman also store sample mask in face register */
299 int face_gpr;
300 /* sample id is .w component stored in fixed point position register */
301 int fixed_pt_position_gpr;
302 int colors_used;
303 boolean clip_vertex_write;
304 unsigned cv_output;
305 unsigned edgeflag_output;
306 int fragcoord_input;
307 int native_integers;
308 int next_ring_offset;
309 int gs_out_ring_offset;
310 int gs_next_vertex;
311 struct r600_shader *gs_for_vs;
312 int gs_export_gpr_tregs[4];
313 const struct pipe_stream_output_info *gs_stream_output_info;
314 unsigned enabled_stream_buffers_mask;
315 };
316
317 struct r600_shader_tgsi_instruction {
318 unsigned op;
319 int (*process)(struct r600_shader_ctx *ctx);
320 };
321
322 static int emit_gs_ring_writes(struct r600_shader_ctx *ctx, const struct pipe_stream_output_info *so, int stream, bool ind);
323 static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[], eg_shader_tgsi_instruction[], cm_shader_tgsi_instruction[];
324 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx);
325 static inline void callstack_push(struct r600_shader_ctx *ctx, unsigned reason);
326 static void fc_pushlevel(struct r600_shader_ctx *ctx, int type);
327 static int tgsi_else(struct r600_shader_ctx *ctx);
328 static int tgsi_endif(struct r600_shader_ctx *ctx);
329 static int tgsi_bgnloop(struct r600_shader_ctx *ctx);
330 static int tgsi_endloop(struct r600_shader_ctx *ctx);
331 static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx);
332 static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx,
333 unsigned int cb_idx, unsigned cb_rel, unsigned int offset, unsigned ar_chan,
334 unsigned int dst_reg);
335 static void r600_bytecode_src(struct r600_bytecode_alu_src *bc_src,
336 const struct r600_shader_src *shader_src,
337 unsigned chan);
338
339 static int tgsi_is_supported(struct r600_shader_ctx *ctx)
340 {
341 struct tgsi_full_instruction *i = &ctx->parse.FullToken.FullInstruction;
342 int j;
343
344 if (i->Instruction.NumDstRegs > 1 && i->Instruction.Opcode != TGSI_OPCODE_DFRACEXP) {
345 R600_ERR("too many dst (%d)\n", i->Instruction.NumDstRegs);
346 return -EINVAL;
347 }
348 if (i->Instruction.Predicate) {
349 R600_ERR("predicate unsupported\n");
350 return -EINVAL;
351 }
352 #if 0
353 if (i->Instruction.Label) {
354 R600_ERR("label unsupported\n");
355 return -EINVAL;
356 }
357 #endif
358 for (j = 0; j < i->Instruction.NumSrcRegs; j++) {
359 if (i->Src[j].Register.Dimension) {
360 switch (i->Src[j].Register.File) {
361 case TGSI_FILE_CONSTANT:
362 break;
363 case TGSI_FILE_INPUT:
364 if (ctx->type == TGSI_PROCESSOR_GEOMETRY)
365 break;
366 default:
367 R600_ERR("unsupported src %d (dimension %d)\n", j,
368 i->Src[j].Register.Dimension);
369 return -EINVAL;
370 }
371 }
372 }
373 for (j = 0; j < i->Instruction.NumDstRegs; j++) {
374 if (i->Dst[j].Register.Dimension) {
375 R600_ERR("unsupported dst (dimension)\n");
376 return -EINVAL;
377 }
378 }
379 return 0;
380 }
381
382 int eg_get_interpolator_index(unsigned interpolate, unsigned location)
383 {
384 if (interpolate == TGSI_INTERPOLATE_COLOR ||
385 interpolate == TGSI_INTERPOLATE_LINEAR ||
386 interpolate == TGSI_INTERPOLATE_PERSPECTIVE)
387 {
388 int is_linear = interpolate == TGSI_INTERPOLATE_LINEAR;
389 int loc;
390
391 switch(location) {
392 case TGSI_INTERPOLATE_LOC_CENTER:
393 loc = 1;
394 break;
395 case TGSI_INTERPOLATE_LOC_CENTROID:
396 loc = 2;
397 break;
398 case TGSI_INTERPOLATE_LOC_SAMPLE:
399 default:
400 loc = 0; break;
401 }
402
403 return is_linear * 3 + loc;
404 }
405
406 return -1;
407 }
408
409 static void evergreen_interp_assign_ij_index(struct r600_shader_ctx *ctx,
410 int input)
411 {
412 int i = eg_get_interpolator_index(
413 ctx->shader->input[input].interpolate,
414 ctx->shader->input[input].interpolate_location);
415 assert(i >= 0);
416 ctx->shader->input[input].ij_index = ctx->eg_interpolators[i].ij_index;
417 }
418
419 static int evergreen_interp_alu(struct r600_shader_ctx *ctx, int input)
420 {
421 int i, r;
422 struct r600_bytecode_alu alu;
423 int gpr = 0, base_chan = 0;
424 int ij_index = ctx->shader->input[input].ij_index;
425
426 /* work out gpr and base_chan from index */
427 gpr = ij_index / 2;
428 base_chan = (2 * (ij_index % 2)) + 1;
429
430 for (i = 0; i < 8; i++) {
431 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
432
433 if (i < 4)
434 alu.op = ALU_OP2_INTERP_ZW;
435 else
436 alu.op = ALU_OP2_INTERP_XY;
437
438 if ((i > 1) && (i < 6)) {
439 alu.dst.sel = ctx->shader->input[input].gpr;
440 alu.dst.write = 1;
441 }
442
443 alu.dst.chan = i % 4;
444
445 alu.src[0].sel = gpr;
446 alu.src[0].chan = (base_chan - (i % 2));
447
448 alu.src[1].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
449
450 alu.bank_swizzle_force = SQ_ALU_VEC_210;
451 if ((i % 4) == 3)
452 alu.last = 1;
453 r = r600_bytecode_add_alu(ctx->bc, &alu);
454 if (r)
455 return r;
456 }
457 return 0;
458 }
459
460 static int evergreen_interp_flat(struct r600_shader_ctx *ctx, int input)
461 {
462 int i, r;
463 struct r600_bytecode_alu alu;
464
465 for (i = 0; i < 4; i++) {
466 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
467
468 alu.op = ALU_OP1_INTERP_LOAD_P0;
469
470 alu.dst.sel = ctx->shader->input[input].gpr;
471 alu.dst.write = 1;
472
473 alu.dst.chan = i;
474
475 alu.src[0].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
476 alu.src[0].chan = i;
477
478 if (i == 3)
479 alu.last = 1;
480 r = r600_bytecode_add_alu(ctx->bc, &alu);
481 if (r)
482 return r;
483 }
484 return 0;
485 }
486
487 /*
488 * Special export handling in shaders
489 *
490 * shader export ARRAY_BASE for EXPORT_POS:
491 * 60 is position
492 * 61 is misc vector
493 * 62, 63 are clip distance vectors
494 *
495 * The use of the values exported in 61-63 are controlled by PA_CL_VS_OUT_CNTL:
496 * VS_OUT_MISC_VEC_ENA - enables the use of all fields in export 61
497 * USE_VTX_POINT_SIZE - point size in the X channel of export 61
498 * USE_VTX_EDGE_FLAG - edge flag in the Y channel of export 61
499 * USE_VTX_RENDER_TARGET_INDX - render target index in the Z channel of export 61
500 * USE_VTX_VIEWPORT_INDX - viewport index in the W channel of export 61
501 * USE_VTX_KILL_FLAG - kill flag in the Z channel of export 61 (mutually
502 * exclusive from render target index)
503 * VS_OUT_CCDIST0_VEC_ENA/VS_OUT_CCDIST1_VEC_ENA - enable clip distance vectors
504 *
505 *
506 * shader export ARRAY_BASE for EXPORT_PIXEL:
507 * 0-7 CB targets
508 * 61 computed Z vector
509 *
510 * The use of the values exported in the computed Z vector are controlled
511 * by DB_SHADER_CONTROL:
512 * Z_EXPORT_ENABLE - Z as a float in RED
513 * STENCIL_REF_EXPORT_ENABLE - stencil ref as int in GREEN
514 * COVERAGE_TO_MASK_ENABLE - alpha to mask in ALPHA
515 * MASK_EXPORT_ENABLE - pixel sample mask in BLUE
516 * DB_SOURCE_FORMAT - export control restrictions
517 *
518 */
519
520
521 /* Map name/sid pair from tgsi to the 8-bit semantic index for SPI setup */
522 static int r600_spi_sid(struct r600_shader_io * io)
523 {
524 int index, name = io->name;
525
526 /* These params are handled differently, they don't need
527 * semantic indices, so we'll use 0 for them.
528 */
529 if (name == TGSI_SEMANTIC_POSITION ||
530 name == TGSI_SEMANTIC_PSIZE ||
531 name == TGSI_SEMANTIC_EDGEFLAG ||
532 name == TGSI_SEMANTIC_FACE ||
533 name == TGSI_SEMANTIC_SAMPLEMASK)
534 index = 0;
535 else {
536 if (name == TGSI_SEMANTIC_GENERIC) {
537 /* For generic params simply use sid from tgsi */
538 index = io->sid;
539 } else {
540 /* For non-generic params - pack name and sid into 8 bits */
541 index = 0x80 | (name<<3) | (io->sid);
542 }
543
544 /* Make sure that all really used indices have nonzero value, so
545 * we can just compare it to 0 later instead of comparing the name
546 * with different values to detect special cases. */
547 index++;
548 }
549
550 return index;
551 };
552
553 /* turn input into interpolate on EG */
554 static int evergreen_interp_input(struct r600_shader_ctx *ctx, int index)
555 {
556 int r = 0;
557
558 if (ctx->shader->input[index].spi_sid) {
559 ctx->shader->input[index].lds_pos = ctx->shader->nlds++;
560 if (ctx->shader->input[index].interpolate > 0) {
561 evergreen_interp_assign_ij_index(ctx, index);
562 if (!ctx->use_llvm)
563 r = evergreen_interp_alu(ctx, index);
564 } else {
565 if (!ctx->use_llvm)
566 r = evergreen_interp_flat(ctx, index);
567 }
568 }
569 return r;
570 }
571
572 static int select_twoside_color(struct r600_shader_ctx *ctx, int front, int back)
573 {
574 struct r600_bytecode_alu alu;
575 int i, r;
576 int gpr_front = ctx->shader->input[front].gpr;
577 int gpr_back = ctx->shader->input[back].gpr;
578
579 for (i = 0; i < 4; i++) {
580 memset(&alu, 0, sizeof(alu));
581 alu.op = ALU_OP3_CNDGT;
582 alu.is_op3 = 1;
583 alu.dst.write = 1;
584 alu.dst.sel = gpr_front;
585 alu.src[0].sel = ctx->face_gpr;
586 alu.src[1].sel = gpr_front;
587 alu.src[2].sel = gpr_back;
588
589 alu.dst.chan = i;
590 alu.src[1].chan = i;
591 alu.src[2].chan = i;
592 alu.last = (i==3);
593
594 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
595 return r;
596 }
597
598 return 0;
599 }
600
601 static inline int get_address_file_reg(struct r600_shader_ctx *ctx, int index)
602 {
603 return index > 0 ? ctx->bc->index_reg[index - 1] : ctx->bc->ar_reg;
604 }
605
606 static int vs_add_primid_output(struct r600_shader_ctx *ctx, int prim_id_sid)
607 {
608 int i;
609 i = ctx->shader->noutput++;
610 ctx->shader->output[i].name = TGSI_SEMANTIC_PRIMID;
611 ctx->shader->output[i].sid = 0;
612 ctx->shader->output[i].gpr = 0;
613 ctx->shader->output[i].interpolate = TGSI_INTERPOLATE_CONSTANT;
614 ctx->shader->output[i].write_mask = 0x4;
615 ctx->shader->output[i].spi_sid = prim_id_sid;
616
617 return 0;
618 }
619
620 static int tgsi_declaration(struct r600_shader_ctx *ctx)
621 {
622 struct tgsi_full_declaration *d = &ctx->parse.FullToken.FullDeclaration;
623 int r, i, j, count = d->Range.Last - d->Range.First + 1;
624
625 switch (d->Declaration.File) {
626 case TGSI_FILE_INPUT:
627 for (j = 0; j < count; j++) {
628 i = ctx->shader->ninput + j;
629 assert(i < Elements(ctx->shader->input));
630 ctx->shader->input[i].name = d->Semantic.Name;
631 ctx->shader->input[i].sid = d->Semantic.Index + j;
632 ctx->shader->input[i].interpolate = d->Interp.Interpolate;
633 ctx->shader->input[i].interpolate_location = d->Interp.Location;
634 ctx->shader->input[i].gpr = ctx->file_offset[TGSI_FILE_INPUT] + d->Range.First + j;
635 if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
636 ctx->shader->input[i].spi_sid = r600_spi_sid(&ctx->shader->input[i]);
637 switch (ctx->shader->input[i].name) {
638 case TGSI_SEMANTIC_FACE:
639 if (ctx->face_gpr != -1)
640 ctx->shader->input[i].gpr = ctx->face_gpr; /* already allocated by allocate_system_value_inputs */
641 else
642 ctx->face_gpr = ctx->shader->input[i].gpr;
643 break;
644 case TGSI_SEMANTIC_COLOR:
645 ctx->colors_used++;
646 break;
647 case TGSI_SEMANTIC_POSITION:
648 ctx->fragcoord_input = i;
649 break;
650 case TGSI_SEMANTIC_PRIMID:
651 /* set this for now */
652 ctx->shader->gs_prim_id_input = true;
653 ctx->shader->ps_prim_id_input = i;
654 break;
655 }
656 if (ctx->bc->chip_class >= EVERGREEN) {
657 if ((r = evergreen_interp_input(ctx, i)))
658 return r;
659 }
660 } else if (ctx->type == TGSI_PROCESSOR_GEOMETRY) {
661 /* FIXME probably skip inputs if they aren't passed in the ring */
662 ctx->shader->input[i].ring_offset = ctx->next_ring_offset;
663 ctx->next_ring_offset += 16;
664 if (ctx->shader->input[i].name == TGSI_SEMANTIC_PRIMID)
665 ctx->shader->gs_prim_id_input = true;
666 }
667 }
668 ctx->shader->ninput += count;
669 break;
670 case TGSI_FILE_OUTPUT:
671 for (j = 0; j < count; j++) {
672 i = ctx->shader->noutput + j;
673 assert(i < Elements(ctx->shader->output));
674 ctx->shader->output[i].name = d->Semantic.Name;
675 ctx->shader->output[i].sid = d->Semantic.Index + j;
676 ctx->shader->output[i].gpr = ctx->file_offset[TGSI_FILE_OUTPUT] + d->Range.First + j;
677 ctx->shader->output[i].interpolate = d->Interp.Interpolate;
678 ctx->shader->output[i].write_mask = d->Declaration.UsageMask;
679 if (ctx->type == TGSI_PROCESSOR_VERTEX ||
680 ctx->type == TGSI_PROCESSOR_GEOMETRY) {
681 ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);
682 switch (d->Semantic.Name) {
683 case TGSI_SEMANTIC_CLIPDIST:
684 ctx->shader->clip_dist_write |= d->Declaration.UsageMask <<
685 ((d->Semantic.Index + j) << 2);
686 break;
687 case TGSI_SEMANTIC_PSIZE:
688 ctx->shader->vs_out_misc_write = 1;
689 ctx->shader->vs_out_point_size = 1;
690 break;
691 case TGSI_SEMANTIC_EDGEFLAG:
692 ctx->shader->vs_out_misc_write = 1;
693 ctx->shader->vs_out_edgeflag = 1;
694 ctx->edgeflag_output = i;
695 break;
696 case TGSI_SEMANTIC_VIEWPORT_INDEX:
697 ctx->shader->vs_out_misc_write = 1;
698 ctx->shader->vs_out_viewport = 1;
699 break;
700 case TGSI_SEMANTIC_LAYER:
701 ctx->shader->vs_out_misc_write = 1;
702 ctx->shader->vs_out_layer = 1;
703 break;
704 case TGSI_SEMANTIC_CLIPVERTEX:
705 ctx->clip_vertex_write = TRUE;
706 ctx->cv_output = i;
707 break;
708 }
709 if (ctx->type == TGSI_PROCESSOR_GEOMETRY) {
710 ctx->gs_out_ring_offset += 16;
711 }
712 } else if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
713 switch (d->Semantic.Name) {
714 case TGSI_SEMANTIC_COLOR:
715 ctx->shader->nr_ps_max_color_exports++;
716 break;
717 }
718 }
719 }
720 ctx->shader->noutput += count;
721 break;
722 case TGSI_FILE_TEMPORARY:
723 if (ctx->info.indirect_files & (1 << TGSI_FILE_TEMPORARY)) {
724 if (d->Array.ArrayID) {
725 r600_add_gpr_array(ctx->shader,
726 ctx->file_offset[TGSI_FILE_TEMPORARY] +
727 d->Range.First,
728 d->Range.Last - d->Range.First + 1, 0x0F);
729 }
730 }
731 break;
732
733 case TGSI_FILE_CONSTANT:
734 case TGSI_FILE_SAMPLER:
735 case TGSI_FILE_SAMPLER_VIEW:
736 case TGSI_FILE_ADDRESS:
737 break;
738
739 case TGSI_FILE_SYSTEM_VALUE:
740 if (d->Semantic.Name == TGSI_SEMANTIC_SAMPLEMASK ||
741 d->Semantic.Name == TGSI_SEMANTIC_SAMPLEID ||
742 d->Semantic.Name == TGSI_SEMANTIC_SAMPLEPOS) {
743 break; /* Already handled from allocate_system_value_inputs */
744 } else if (d->Semantic.Name == TGSI_SEMANTIC_INSTANCEID) {
745 if (!ctx->native_integers) {
746 struct r600_bytecode_alu alu;
747 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
748
749 alu.op = ALU_OP1_INT_TO_FLT;
750 alu.src[0].sel = 0;
751 alu.src[0].chan = 3;
752
753 alu.dst.sel = 0;
754 alu.dst.chan = 3;
755 alu.dst.write = 1;
756 alu.last = 1;
757
758 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
759 return r;
760 }
761 break;
762 } else if (d->Semantic.Name == TGSI_SEMANTIC_VERTEXID)
763 break;
764 else if (d->Semantic.Name == TGSI_SEMANTIC_INVOCATIONID)
765 break;
766 default:
767 R600_ERR("unsupported file %d declaration\n", d->Declaration.File);
768 return -EINVAL;
769 }
770 return 0;
771 }
772
773 static int r600_get_temp(struct r600_shader_ctx *ctx)
774 {
775 return ctx->temp_reg + ctx->max_driver_temp_used++;
776 }
777
778 static int allocate_system_value_inputs(struct r600_shader_ctx *ctx, int gpr_offset)
779 {
780 struct tgsi_parse_context parse;
781 struct {
782 boolean enabled;
783 int *reg;
784 unsigned name, alternate_name;
785 } inputs[2] = {
786 { false, &ctx->face_gpr, TGSI_SEMANTIC_SAMPLEMASK, ~0u }, /* lives in Front Face GPR.z */
787
788 { false, &ctx->fixed_pt_position_gpr, TGSI_SEMANTIC_SAMPLEID, TGSI_SEMANTIC_SAMPLEPOS } /* SAMPLEID is in Fixed Point Position GPR.w */
789 };
790 int i, k, num_regs = 0;
791
792 if (tgsi_parse_init(&parse, ctx->tokens) != TGSI_PARSE_OK) {
793 return 0;
794 }
795
796 /* need to scan shader for system values and interpolateAtSample/Offset/Centroid */
797 while (!tgsi_parse_end_of_tokens(&parse)) {
798 tgsi_parse_token(&parse);
799
800 if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_INSTRUCTION) {
801 const struct tgsi_full_instruction *inst = &parse.FullToken.FullInstruction;
802 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE ||
803 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
804 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_CENTROID)
805 {
806 int interpolate, location, k;
807
808 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
809 location = TGSI_INTERPOLATE_LOC_CENTER;
810 inputs[1].enabled = true; /* needs SAMPLEID */
811 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
812 location = TGSI_INTERPOLATE_LOC_CENTER;
813 /* Needs sample positions, currently those are always available */
814 } else {
815 location = TGSI_INTERPOLATE_LOC_CENTROID;
816 }
817
818 interpolate = ctx->info.input_interpolate[inst->Src[0].Register.Index];
819 k = eg_get_interpolator_index(interpolate, location);
820 ctx->eg_interpolators[k].enabled = true;
821 }
822 } else if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_DECLARATION) {
823 struct tgsi_full_declaration *d = &parse.FullToken.FullDeclaration;
824 if (d->Declaration.File == TGSI_FILE_SYSTEM_VALUE) {
825 for (k = 0; k < Elements(inputs); k++) {
826 if (d->Semantic.Name == inputs[k].name ||
827 d->Semantic.Name == inputs[k].alternate_name) {
828 inputs[k].enabled = true;
829 }
830 }
831 }
832 }
833 }
834
835 tgsi_parse_free(&parse);
836
837 for (i = 0; i < Elements(inputs); i++) {
838 boolean enabled = inputs[i].enabled;
839 int *reg = inputs[i].reg;
840 unsigned name = inputs[i].name;
841
842 if (enabled) {
843 int gpr = gpr_offset + num_regs++;
844
845 // add to inputs, allocate a gpr
846 k = ctx->shader->ninput ++;
847 ctx->shader->input[k].name = name;
848 ctx->shader->input[k].sid = 0;
849 ctx->shader->input[k].interpolate = TGSI_INTERPOLATE_CONSTANT;
850 ctx->shader->input[k].interpolate_location = TGSI_INTERPOLATE_LOC_CENTER;
851 *reg = ctx->shader->input[k].gpr = gpr;
852 }
853 }
854
855 return gpr_offset + num_regs;
856 }
857
858 /*
859 * for evergreen we need to scan the shader to find the number of GPRs we need to
860 * reserve for interpolation and system values
861 *
862 * we need to know if we are going to emit
863 * any sample or centroid inputs
864 * if perspective and linear are required
865 */
866 static int evergreen_gpr_count(struct r600_shader_ctx *ctx)
867 {
868 int i;
869 int num_baryc;
870 struct tgsi_parse_context parse;
871
872 memset(&ctx->eg_interpolators, 0, sizeof(ctx->eg_interpolators));
873
874 for (i = 0; i < ctx->info.num_inputs; i++) {
875 int k;
876 /* skip position/face/mask/sampleid */
877 if (ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_POSITION ||
878 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_FACE ||
879 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_SAMPLEMASK ||
880 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_SAMPLEID)
881 continue;
882
883 k = eg_get_interpolator_index(
884 ctx->info.input_interpolate[i],
885 ctx->info.input_interpolate_loc[i]);
886 if (k >= 0)
887 ctx->eg_interpolators[k].enabled = TRUE;
888 }
889
890 if (tgsi_parse_init(&parse, ctx->tokens) != TGSI_PARSE_OK) {
891 return 0;
892 }
893
894 /* need to scan shader for system values and interpolateAtSample/Offset/Centroid */
895 while (!tgsi_parse_end_of_tokens(&parse)) {
896 tgsi_parse_token(&parse);
897
898 if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_INSTRUCTION) {
899 const struct tgsi_full_instruction *inst = &parse.FullToken.FullInstruction;
900 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE ||
901 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
902 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_CENTROID)
903 {
904 int interpolate, location, k;
905
906 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
907 location = TGSI_INTERPOLATE_LOC_CENTER;
908 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
909 location = TGSI_INTERPOLATE_LOC_CENTER;
910 } else {
911 location = TGSI_INTERPOLATE_LOC_CENTROID;
912 }
913
914 interpolate = ctx->info.input_interpolate[inst->Src[0].Register.Index];
915 k = eg_get_interpolator_index(interpolate, location);
916 ctx->eg_interpolators[k].enabled = true;
917 }
918 }
919 }
920
921 tgsi_parse_free(&parse);
922
923 /* assign gpr to each interpolator according to priority */
924 num_baryc = 0;
925 for (i = 0; i < Elements(ctx->eg_interpolators); i++) {
926 if (ctx->eg_interpolators[i].enabled) {
927 ctx->eg_interpolators[i].ij_index = num_baryc;
928 num_baryc ++;
929 }
930 }
931
932 /* XXX PULL MODEL and LINE STIPPLE */
933
934 num_baryc = (num_baryc + 1) >> 1;
935 return allocate_system_value_inputs(ctx, num_baryc);
936 }
937
938 /* sample_id_sel == NULL means fetch for current sample */
939 static int load_sample_position(struct r600_shader_ctx *ctx, struct r600_shader_src *sample_id, int chan_sel)
940 {
941 struct r600_bytecode_vtx vtx;
942 int r, t1;
943
944 assert(ctx->fixed_pt_position_gpr != -1);
945
946 t1 = r600_get_temp(ctx);
947
948 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
949 vtx.op = FETCH_OP_VFETCH;
950 vtx.buffer_id = R600_BUFFER_INFO_CONST_BUFFER;
951 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
952 if (sample_id == NULL) {
953 vtx.src_gpr = ctx->fixed_pt_position_gpr; // SAMPLEID is in .w;
954 vtx.src_sel_x = 3;
955 }
956 else {
957 struct r600_bytecode_alu alu;
958
959 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
960 alu.op = ALU_OP1_MOV;
961 r600_bytecode_src(&alu.src[0], sample_id, chan_sel);
962 alu.dst.sel = t1;
963 alu.dst.write = 1;
964 alu.last = 1;
965 r = r600_bytecode_add_alu(ctx->bc, &alu);
966 if (r)
967 return r;
968
969 vtx.src_gpr = t1;
970 vtx.src_sel_x = 0;
971 }
972 vtx.mega_fetch_count = 16;
973 vtx.dst_gpr = t1;
974 vtx.dst_sel_x = 0;
975 vtx.dst_sel_y = 1;
976 vtx.dst_sel_z = 2;
977 vtx.dst_sel_w = 3;
978 vtx.data_format = FMT_32_32_32_32_FLOAT;
979 vtx.num_format_all = 2;
980 vtx.format_comp_all = 1;
981 vtx.use_const_fields = 0;
982 vtx.offset = 1; // first element is size of buffer
983 vtx.endian = r600_endian_swap(32);
984 vtx.srf_mode_all = 1; /* SRF_MODE_NO_ZERO */
985
986 r = r600_bytecode_add_vtx(ctx->bc, &vtx);
987 if (r)
988 return r;
989
990 return t1;
991 }
992
993 static void tgsi_src(struct r600_shader_ctx *ctx,
994 const struct tgsi_full_src_register *tgsi_src,
995 struct r600_shader_src *r600_src)
996 {
997 memset(r600_src, 0, sizeof(*r600_src));
998 r600_src->swizzle[0] = tgsi_src->Register.SwizzleX;
999 r600_src->swizzle[1] = tgsi_src->Register.SwizzleY;
1000 r600_src->swizzle[2] = tgsi_src->Register.SwizzleZ;
1001 r600_src->swizzle[3] = tgsi_src->Register.SwizzleW;
1002 r600_src->neg = tgsi_src->Register.Negate;
1003 r600_src->abs = tgsi_src->Register.Absolute;
1004
1005 if (tgsi_src->Register.File == TGSI_FILE_IMMEDIATE) {
1006 int index;
1007 if ((tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleY) &&
1008 (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleZ) &&
1009 (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleW)) {
1010
1011 index = tgsi_src->Register.Index * 4 + tgsi_src->Register.SwizzleX;
1012 r600_bytecode_special_constants(ctx->literals[index], &r600_src->sel, &r600_src->neg, r600_src->abs);
1013 if (r600_src->sel != V_SQ_ALU_SRC_LITERAL)
1014 return;
1015 }
1016 index = tgsi_src->Register.Index;
1017 r600_src->sel = V_SQ_ALU_SRC_LITERAL;
1018 memcpy(r600_src->value, ctx->literals + index * 4, sizeof(r600_src->value));
1019 } else if (tgsi_src->Register.File == TGSI_FILE_SYSTEM_VALUE) {
1020 if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEMASK) {
1021 r600_src->swizzle[0] = 2; // Z value
1022 r600_src->swizzle[1] = 2;
1023 r600_src->swizzle[2] = 2;
1024 r600_src->swizzle[3] = 2;
1025 r600_src->sel = ctx->face_gpr;
1026 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEID) {
1027 r600_src->swizzle[0] = 3; // W value
1028 r600_src->swizzle[1] = 3;
1029 r600_src->swizzle[2] = 3;
1030 r600_src->swizzle[3] = 3;
1031 r600_src->sel = ctx->fixed_pt_position_gpr;
1032 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEPOS) {
1033 r600_src->swizzle[0] = 0;
1034 r600_src->swizzle[1] = 1;
1035 r600_src->swizzle[2] = 4;
1036 r600_src->swizzle[3] = 4;
1037 r600_src->sel = load_sample_position(ctx, NULL, -1);
1038 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INSTANCEID) {
1039 r600_src->swizzle[0] = 3;
1040 r600_src->swizzle[1] = 3;
1041 r600_src->swizzle[2] = 3;
1042 r600_src->swizzle[3] = 3;
1043 r600_src->sel = 0;
1044 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_VERTEXID) {
1045 r600_src->swizzle[0] = 0;
1046 r600_src->swizzle[1] = 0;
1047 r600_src->swizzle[2] = 0;
1048 r600_src->swizzle[3] = 0;
1049 r600_src->sel = 0;
1050 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INVOCATIONID) {
1051 r600_src->swizzle[0] = 3;
1052 r600_src->swizzle[1] = 3;
1053 r600_src->swizzle[2] = 3;
1054 r600_src->swizzle[3] = 3;
1055 r600_src->sel = 1;
1056 }
1057 } else {
1058 if (tgsi_src->Register.Indirect)
1059 r600_src->rel = V_SQ_REL_RELATIVE;
1060 r600_src->sel = tgsi_src->Register.Index;
1061 r600_src->sel += ctx->file_offset[tgsi_src->Register.File];
1062 }
1063 if (tgsi_src->Register.File == TGSI_FILE_CONSTANT) {
1064 if (tgsi_src->Register.Dimension) {
1065 r600_src->kc_bank = tgsi_src->Dimension.Index;
1066 if (tgsi_src->Dimension.Indirect) {
1067 r600_src->kc_rel = 1;
1068 }
1069 }
1070 }
1071 }
1072
1073 static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx,
1074 unsigned int cb_idx, unsigned cb_rel, unsigned int offset, unsigned ar_chan,
1075 unsigned int dst_reg)
1076 {
1077 struct r600_bytecode_vtx vtx;
1078 unsigned int ar_reg;
1079 int r;
1080
1081 if (offset) {
1082 struct r600_bytecode_alu alu;
1083
1084 memset(&alu, 0, sizeof(alu));
1085
1086 alu.op = ALU_OP2_ADD_INT;
1087 alu.src[0].sel = ctx->bc->ar_reg;
1088 alu.src[0].chan = ar_chan;
1089
1090 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
1091 alu.src[1].value = offset;
1092
1093 alu.dst.sel = dst_reg;
1094 alu.dst.chan = ar_chan;
1095 alu.dst.write = 1;
1096 alu.last = 1;
1097
1098 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1099 return r;
1100
1101 ar_reg = dst_reg;
1102 } else {
1103 ar_reg = ctx->bc->ar_reg;
1104 }
1105
1106 memset(&vtx, 0, sizeof(vtx));
1107 vtx.buffer_id = cb_idx;
1108 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1109 vtx.src_gpr = ar_reg;
1110 vtx.src_sel_x = ar_chan;
1111 vtx.mega_fetch_count = 16;
1112 vtx.dst_gpr = dst_reg;
1113 vtx.dst_sel_x = 0; /* SEL_X */
1114 vtx.dst_sel_y = 1; /* SEL_Y */
1115 vtx.dst_sel_z = 2; /* SEL_Z */
1116 vtx.dst_sel_w = 3; /* SEL_W */
1117 vtx.data_format = FMT_32_32_32_32_FLOAT;
1118 vtx.num_format_all = 2; /* NUM_FORMAT_SCALED */
1119 vtx.format_comp_all = 1; /* FORMAT_COMP_SIGNED */
1120 vtx.endian = r600_endian_swap(32);
1121 vtx.buffer_index_mode = cb_rel; // cb_rel ? V_SQ_CF_INDEX_0 : V_SQ_CF_INDEX_NONE;
1122
1123 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
1124 return r;
1125
1126 return 0;
1127 }
1128
1129 static int fetch_gs_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg)
1130 {
1131 struct r600_bytecode_vtx vtx;
1132 int r;
1133 unsigned index = src->Register.Index;
1134 unsigned vtx_id = src->Dimension.Index;
1135 int offset_reg = vtx_id / 3;
1136 int offset_chan = vtx_id % 3;
1137
1138 /* offsets of per-vertex data in ESGS ring are passed to GS in R0.x, R0.y,
1139 * R0.w, R1.x, R1.y, R1.z (it seems R0.z is used for PrimitiveID) */
1140
1141 if (offset_reg == 0 && offset_chan == 2)
1142 offset_chan = 3;
1143
1144 if (src->Dimension.Indirect) {
1145 int treg[3];
1146 int t2;
1147 struct r600_bytecode_alu alu;
1148 int r, i;
1149
1150 /* you have got to be shitting me -
1151 we have to put the R0.x/y/w into Rt.x Rt+1.x Rt+2.x then index reg from Rt.
1152 at least this is what fglrx seems to do. */
1153 for (i = 0; i < 3; i++) {
1154 treg[i] = r600_get_temp(ctx);
1155 }
1156 r600_add_gpr_array(ctx->shader, treg[0], 3, 0x0F);
1157
1158 t2 = r600_get_temp(ctx);
1159 for (i = 0; i < 3; i++) {
1160 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1161 alu.op = ALU_OP1_MOV;
1162 alu.src[0].sel = 0;
1163 alu.src[0].chan = i == 2 ? 3 : i;
1164 alu.dst.sel = treg[i];
1165 alu.dst.chan = 0;
1166 alu.dst.write = 1;
1167 alu.last = 1;
1168 r = r600_bytecode_add_alu(ctx->bc, &alu);
1169 if (r)
1170 return r;
1171 }
1172 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1173 alu.op = ALU_OP1_MOV;
1174 alu.src[0].sel = treg[0];
1175 alu.src[0].rel = 1;
1176 alu.dst.sel = t2;
1177 alu.dst.write = 1;
1178 alu.last = 1;
1179 r = r600_bytecode_add_alu(ctx->bc, &alu);
1180 if (r)
1181 return r;
1182 offset_reg = t2;
1183 }
1184
1185
1186 memset(&vtx, 0, sizeof(vtx));
1187 vtx.buffer_id = R600_GS_RING_CONST_BUFFER;
1188 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1189 vtx.src_gpr = offset_reg;
1190 vtx.src_sel_x = offset_chan;
1191 vtx.offset = index * 16; /*bytes*/
1192 vtx.mega_fetch_count = 16;
1193 vtx.dst_gpr = dst_reg;
1194 vtx.dst_sel_x = 0; /* SEL_X */
1195 vtx.dst_sel_y = 1; /* SEL_Y */
1196 vtx.dst_sel_z = 2; /* SEL_Z */
1197 vtx.dst_sel_w = 3; /* SEL_W */
1198 if (ctx->bc->chip_class >= EVERGREEN) {
1199 vtx.use_const_fields = 1;
1200 } else {
1201 vtx.data_format = FMT_32_32_32_32_FLOAT;
1202 }
1203
1204 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
1205 return r;
1206
1207 return 0;
1208 }
1209
1210 static int tgsi_split_gs_inputs(struct r600_shader_ctx *ctx)
1211 {
1212 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1213 int i;
1214
1215 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
1216 struct tgsi_full_src_register *src = &inst->Src[i];
1217
1218 if (src->Register.File == TGSI_FILE_INPUT) {
1219 if (ctx->shader->input[src->Register.Index].name == TGSI_SEMANTIC_PRIMID) {
1220 /* primitive id is in R0.z */
1221 ctx->src[i].sel = 0;
1222 ctx->src[i].swizzle[0] = 2;
1223 }
1224 }
1225 if (src->Register.File == TGSI_FILE_INPUT && src->Register.Dimension) {
1226 int treg = r600_get_temp(ctx);
1227
1228 fetch_gs_input(ctx, src, treg);
1229 ctx->src[i].sel = treg;
1230 }
1231 }
1232 return 0;
1233 }
1234
1235 static int tgsi_split_constant(struct r600_shader_ctx *ctx)
1236 {
1237 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1238 struct r600_bytecode_alu alu;
1239 int i, j, k, nconst, r;
1240
1241 for (i = 0, nconst = 0; i < inst->Instruction.NumSrcRegs; i++) {
1242 if (inst->Src[i].Register.File == TGSI_FILE_CONSTANT) {
1243 nconst++;
1244 }
1245 tgsi_src(ctx, &inst->Src[i], &ctx->src[i]);
1246 }
1247 for (i = 0, j = nconst - 1; i < inst->Instruction.NumSrcRegs; i++) {
1248 if (inst->Src[i].Register.File != TGSI_FILE_CONSTANT) {
1249 continue;
1250 }
1251
1252 if (ctx->src[i].rel) {
1253 int chan = inst->Src[i].Indirect.Swizzle;
1254 int treg = r600_get_temp(ctx);
1255 if ((r = tgsi_fetch_rel_const(ctx, ctx->src[i].kc_bank, ctx->src[i].kc_rel, ctx->src[i].sel - 512, chan, treg)))
1256 return r;
1257
1258 ctx->src[i].kc_bank = 0;
1259 ctx->src[i].kc_rel = 0;
1260 ctx->src[i].sel = treg;
1261 ctx->src[i].rel = 0;
1262 j--;
1263 } else if (j > 0) {
1264 int treg = r600_get_temp(ctx);
1265 for (k = 0; k < 4; k++) {
1266 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1267 alu.op = ALU_OP1_MOV;
1268 alu.src[0].sel = ctx->src[i].sel;
1269 alu.src[0].chan = k;
1270 alu.src[0].rel = ctx->src[i].rel;
1271 alu.src[0].kc_bank = ctx->src[i].kc_bank;
1272 alu.src[0].kc_rel = ctx->src[i].kc_rel;
1273 alu.dst.sel = treg;
1274 alu.dst.chan = k;
1275 alu.dst.write = 1;
1276 if (k == 3)
1277 alu.last = 1;
1278 r = r600_bytecode_add_alu(ctx->bc, &alu);
1279 if (r)
1280 return r;
1281 }
1282 ctx->src[i].sel = treg;
1283 ctx->src[i].rel =0;
1284 j--;
1285 }
1286 }
1287 return 0;
1288 }
1289
1290 /* need to move any immediate into a temp - for trig functions which use literal for PI stuff */
1291 static int tgsi_split_literal_constant(struct r600_shader_ctx *ctx)
1292 {
1293 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1294 struct r600_bytecode_alu alu;
1295 int i, j, k, nliteral, r;
1296
1297 for (i = 0, nliteral = 0; i < inst->Instruction.NumSrcRegs; i++) {
1298 if (ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
1299 nliteral++;
1300 }
1301 }
1302 for (i = 0, j = nliteral - 1; i < inst->Instruction.NumSrcRegs; i++) {
1303 if (j > 0 && ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
1304 int treg = r600_get_temp(ctx);
1305 for (k = 0; k < 4; k++) {
1306 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1307 alu.op = ALU_OP1_MOV;
1308 alu.src[0].sel = ctx->src[i].sel;
1309 alu.src[0].chan = k;
1310 alu.src[0].value = ctx->src[i].value[k];
1311 alu.dst.sel = treg;
1312 alu.dst.chan = k;
1313 alu.dst.write = 1;
1314 if (k == 3)
1315 alu.last = 1;
1316 r = r600_bytecode_add_alu(ctx->bc, &alu);
1317 if (r)
1318 return r;
1319 }
1320 ctx->src[i].sel = treg;
1321 j--;
1322 }
1323 }
1324 return 0;
1325 }
1326
1327 static int process_twoside_color_inputs(struct r600_shader_ctx *ctx)
1328 {
1329 int i, r, count = ctx->shader->ninput;
1330
1331 for (i = 0; i < count; i++) {
1332 if (ctx->shader->input[i].name == TGSI_SEMANTIC_COLOR) {
1333 r = select_twoside_color(ctx, i, ctx->shader->input[i].back_color_input);
1334 if (r)
1335 return r;
1336 }
1337 }
1338 return 0;
1339 }
1340
1341 static int emit_streamout(struct r600_shader_ctx *ctx, struct pipe_stream_output_info *so,
1342 int stream, unsigned *stream_item_size)
1343 {
1344 unsigned so_gpr[PIPE_MAX_SHADER_OUTPUTS];
1345 unsigned start_comp[PIPE_MAX_SHADER_OUTPUTS];
1346 int i, j, r;
1347
1348 /* Sanity checking. */
1349 if (so->num_outputs > PIPE_MAX_SO_OUTPUTS) {
1350 R600_ERR("Too many stream outputs: %d\n", so->num_outputs);
1351 r = -EINVAL;
1352 goto out_err;
1353 }
1354 for (i = 0; i < so->num_outputs; i++) {
1355 if (so->output[i].output_buffer >= 4) {
1356 R600_ERR("Exceeded the max number of stream output buffers, got: %d\n",
1357 so->output[i].output_buffer);
1358 r = -EINVAL;
1359 goto out_err;
1360 }
1361 }
1362
1363 /* Initialize locations where the outputs are stored. */
1364 for (i = 0; i < so->num_outputs; i++) {
1365
1366 so_gpr[i] = ctx->shader->output[so->output[i].register_index].gpr;
1367 start_comp[i] = so->output[i].start_component;
1368 /* Lower outputs with dst_offset < start_component.
1369 *
1370 * We can only output 4D vectors with a write mask, e.g. we can
1371 * only output the W component at offset 3, etc. If we want
1372 * to store Y, Z, or W at buffer offset 0, we need to use MOV
1373 * to move it to X and output X. */
1374 if (so->output[i].dst_offset < so->output[i].start_component) {
1375 unsigned tmp = r600_get_temp(ctx);
1376
1377 for (j = 0; j < so->output[i].num_components; j++) {
1378 struct r600_bytecode_alu alu;
1379 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1380 alu.op = ALU_OP1_MOV;
1381 alu.src[0].sel = so_gpr[i];
1382 alu.src[0].chan = so->output[i].start_component + j;
1383
1384 alu.dst.sel = tmp;
1385 alu.dst.chan = j;
1386 alu.dst.write = 1;
1387 if (j == so->output[i].num_components - 1)
1388 alu.last = 1;
1389 r = r600_bytecode_add_alu(ctx->bc, &alu);
1390 if (r)
1391 return r;
1392 }
1393 start_comp[i] = 0;
1394 so_gpr[i] = tmp;
1395 }
1396 }
1397
1398 /* Write outputs to buffers. */
1399 for (i = 0; i < so->num_outputs; i++) {
1400 struct r600_bytecode_output output;
1401
1402 if (stream != -1 && stream != so->output[i].output_buffer)
1403 continue;
1404
1405 memset(&output, 0, sizeof(struct r600_bytecode_output));
1406 output.gpr = so_gpr[i];
1407 output.elem_size = so->output[i].num_components - 1;
1408 if (output.elem_size == 2)
1409 output.elem_size = 3; // 3 not supported, write 4 with junk at end
1410 output.array_base = so->output[i].dst_offset - start_comp[i];
1411 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE;
1412 output.burst_count = 1;
1413 /* array_size is an upper limit for the burst_count
1414 * with MEM_STREAM instructions */
1415 output.array_size = 0xFFF;
1416 output.comp_mask = ((1 << so->output[i].num_components) - 1) << start_comp[i];
1417
1418 if (ctx->bc->chip_class >= EVERGREEN) {
1419 switch (so->output[i].output_buffer) {
1420 case 0:
1421 output.op = CF_OP_MEM_STREAM0_BUF0;
1422 break;
1423 case 1:
1424 output.op = CF_OP_MEM_STREAM0_BUF1;
1425 break;
1426 case 2:
1427 output.op = CF_OP_MEM_STREAM0_BUF2;
1428 break;
1429 case 3:
1430 output.op = CF_OP_MEM_STREAM0_BUF3;
1431 break;
1432 }
1433 output.op += so->output[i].stream * 4;
1434 assert(output.op >= CF_OP_MEM_STREAM0_BUF0 && output.op <= CF_OP_MEM_STREAM3_BUF3);
1435 ctx->enabled_stream_buffers_mask |= (1 << so->output[i].output_buffer) << so->output[i].stream * 4;
1436 } else {
1437 switch (so->output[i].output_buffer) {
1438 case 0:
1439 output.op = CF_OP_MEM_STREAM0;
1440 break;
1441 case 1:
1442 output.op = CF_OP_MEM_STREAM1;
1443 break;
1444 case 2:
1445 output.op = CF_OP_MEM_STREAM2;
1446 break;
1447 case 3:
1448 output.op = CF_OP_MEM_STREAM3;
1449 break;
1450 }
1451 ctx->enabled_stream_buffers_mask |= 1 << so->output[i].output_buffer;
1452 }
1453 r = r600_bytecode_add_output(ctx->bc, &output);
1454 if (r)
1455 goto out_err;
1456 }
1457 return 0;
1458 out_err:
1459 return r;
1460 }
1461
1462 static void convert_edgeflag_to_int(struct r600_shader_ctx *ctx)
1463 {
1464 struct r600_bytecode_alu alu;
1465 unsigned reg;
1466
1467 if (!ctx->shader->vs_out_edgeflag)
1468 return;
1469
1470 reg = ctx->shader->output[ctx->edgeflag_output].gpr;
1471
1472 /* clamp(x, 0, 1) */
1473 memset(&alu, 0, sizeof(alu));
1474 alu.op = ALU_OP1_MOV;
1475 alu.src[0].sel = reg;
1476 alu.dst.sel = reg;
1477 alu.dst.write = 1;
1478 alu.dst.clamp = 1;
1479 alu.last = 1;
1480 r600_bytecode_add_alu(ctx->bc, &alu);
1481
1482 memset(&alu, 0, sizeof(alu));
1483 alu.op = ALU_OP1_FLT_TO_INT;
1484 alu.src[0].sel = reg;
1485 alu.dst.sel = reg;
1486 alu.dst.write = 1;
1487 alu.last = 1;
1488 r600_bytecode_add_alu(ctx->bc, &alu);
1489 }
1490
1491 static int generate_gs_copy_shader(struct r600_context *rctx,
1492 struct r600_pipe_shader *gs,
1493 struct pipe_stream_output_info *so)
1494 {
1495 struct r600_shader_ctx ctx = {};
1496 struct r600_shader *gs_shader = &gs->shader;
1497 struct r600_pipe_shader *cshader;
1498 int ocnt = gs_shader->noutput;
1499 struct r600_bytecode_alu alu;
1500 struct r600_bytecode_vtx vtx;
1501 struct r600_bytecode_output output;
1502 struct r600_bytecode_cf *cf_jump, *cf_pop,
1503 *last_exp_pos = NULL, *last_exp_param = NULL;
1504 int i, j, next_clip_pos = 61, next_param = 0;
1505 int ring;
1506
1507 cshader = calloc(1, sizeof(struct r600_pipe_shader));
1508 if (!cshader)
1509 return 0;
1510
1511 memcpy(cshader->shader.output, gs_shader->output, ocnt *
1512 sizeof(struct r600_shader_io));
1513
1514 cshader->shader.noutput = ocnt;
1515
1516 ctx.shader = &cshader->shader;
1517 ctx.bc = &ctx.shader->bc;
1518 ctx.type = ctx.bc->type = TGSI_PROCESSOR_VERTEX;
1519
1520 r600_bytecode_init(ctx.bc, rctx->b.chip_class, rctx->b.family,
1521 rctx->screen->has_compressed_msaa_texturing);
1522
1523 ctx.bc->isa = rctx->isa;
1524
1525 cf_jump = NULL;
1526 memset(cshader->shader.ring_item_sizes, 0, sizeof(cshader->shader.ring_item_sizes));
1527
1528 /* R0.x = R0.x & 0x3fffffff */
1529 memset(&alu, 0, sizeof(alu));
1530 alu.op = ALU_OP2_AND_INT;
1531 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
1532 alu.src[1].value = 0x3fffffff;
1533 alu.dst.write = 1;
1534 r600_bytecode_add_alu(ctx.bc, &alu);
1535
1536 /* R0.y = R0.x >> 30 */
1537 memset(&alu, 0, sizeof(alu));
1538 alu.op = ALU_OP2_LSHR_INT;
1539 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
1540 alu.src[1].value = 0x1e;
1541 alu.dst.chan = 1;
1542 alu.dst.write = 1;
1543 alu.last = 1;
1544 r600_bytecode_add_alu(ctx.bc, &alu);
1545
1546 /* fetch vertex data from GSVS ring */
1547 for (i = 0; i < ocnt; ++i) {
1548 struct r600_shader_io *out = &ctx.shader->output[i];
1549
1550 out->gpr = i + 1;
1551 out->ring_offset = i * 16;
1552
1553 memset(&vtx, 0, sizeof(vtx));
1554 vtx.op = FETCH_OP_VFETCH;
1555 vtx.buffer_id = R600_GS_RING_CONST_BUFFER;
1556 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1557 vtx.offset = out->ring_offset;
1558 vtx.dst_gpr = out->gpr;
1559 vtx.src_gpr = 0;
1560 vtx.dst_sel_x = 0;
1561 vtx.dst_sel_y = 1;
1562 vtx.dst_sel_z = 2;
1563 vtx.dst_sel_w = 3;
1564 if (rctx->b.chip_class >= EVERGREEN) {
1565 vtx.use_const_fields = 1;
1566 } else {
1567 vtx.data_format = FMT_32_32_32_32_FLOAT;
1568 }
1569
1570 r600_bytecode_add_vtx(ctx.bc, &vtx);
1571 }
1572 ctx.temp_reg = i + 1;
1573 for (ring = 3; ring >= 0; --ring) {
1574 bool enabled = false;
1575 for (i = 0; i < so->num_outputs; i++) {
1576 if (so->output[i].stream == ring) {
1577 enabled = true;
1578 break;
1579 }
1580 }
1581 if (ring != 0 && !enabled) {
1582 cshader->shader.ring_item_sizes[ring] = 0;
1583 continue;
1584 }
1585
1586 if (cf_jump) {
1587 // Patch up jump label
1588 r600_bytecode_add_cfinst(ctx.bc, CF_OP_POP);
1589 cf_pop = ctx.bc->cf_last;
1590
1591 cf_jump->cf_addr = cf_pop->id + 2;
1592 cf_jump->pop_count = 1;
1593 cf_pop->cf_addr = cf_pop->id + 2;
1594 cf_pop->pop_count = 1;
1595 }
1596
1597 /* PRED_SETE_INT __, R0.y, ring */
1598 memset(&alu, 0, sizeof(alu));
1599 alu.op = ALU_OP2_PRED_SETE_INT;
1600 alu.src[0].chan = 1;
1601 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
1602 alu.src[1].value = ring;
1603 alu.execute_mask = 1;
1604 alu.update_pred = 1;
1605 alu.last = 1;
1606 r600_bytecode_add_alu_type(ctx.bc, &alu, CF_OP_ALU_PUSH_BEFORE);
1607
1608 r600_bytecode_add_cfinst(ctx.bc, CF_OP_JUMP);
1609 cf_jump = ctx.bc->cf_last;
1610
1611 if (enabled)
1612 emit_streamout(&ctx, so, ring, &cshader->shader.ring_item_sizes[ring]);
1613 cshader->shader.ring_item_sizes[ring] = ocnt * 16;
1614 }
1615
1616 /* export vertex data */
1617 /* XXX factor out common code with r600_shader_from_tgsi ? */
1618 for (i = 0; i < ocnt; ++i) {
1619 struct r600_shader_io *out = &ctx.shader->output[i];
1620 bool instream0 = true;
1621 if (out->name == TGSI_SEMANTIC_CLIPVERTEX)
1622 continue;
1623
1624 for (j = 0; j < so->num_outputs; j++) {
1625 if (so->output[j].register_index == i) {
1626 if (so->output[j].stream == 0)
1627 break;
1628 if (so->output[j].stream > 0)
1629 instream0 = false;
1630 }
1631 }
1632 if (!instream0)
1633 continue;
1634 memset(&output, 0, sizeof(output));
1635 output.gpr = out->gpr;
1636 output.elem_size = 3;
1637 output.swizzle_x = 0;
1638 output.swizzle_y = 1;
1639 output.swizzle_z = 2;
1640 output.swizzle_w = 3;
1641 output.burst_count = 1;
1642 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
1643 output.op = CF_OP_EXPORT;
1644 switch (out->name) {
1645 case TGSI_SEMANTIC_POSITION:
1646 output.array_base = 60;
1647 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1648 break;
1649
1650 case TGSI_SEMANTIC_PSIZE:
1651 output.array_base = 61;
1652 if (next_clip_pos == 61)
1653 next_clip_pos = 62;
1654 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1655 output.swizzle_y = 7;
1656 output.swizzle_z = 7;
1657 output.swizzle_w = 7;
1658 ctx.shader->vs_out_misc_write = 1;
1659 ctx.shader->vs_out_point_size = 1;
1660 break;
1661 case TGSI_SEMANTIC_LAYER:
1662 if (out->spi_sid) {
1663 /* duplicate it as PARAM to pass to the pixel shader */
1664 output.array_base = next_param++;
1665 r600_bytecode_add_output(ctx.bc, &output);
1666 last_exp_param = ctx.bc->cf_last;
1667 }
1668 output.array_base = 61;
1669 if (next_clip_pos == 61)
1670 next_clip_pos = 62;
1671 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1672 output.swizzle_x = 7;
1673 output.swizzle_y = 7;
1674 output.swizzle_z = 0;
1675 output.swizzle_w = 7;
1676 ctx.shader->vs_out_misc_write = 1;
1677 ctx.shader->vs_out_layer = 1;
1678 break;
1679 case TGSI_SEMANTIC_VIEWPORT_INDEX:
1680 if (out->spi_sid) {
1681 /* duplicate it as PARAM to pass to the pixel shader */
1682 output.array_base = next_param++;
1683 r600_bytecode_add_output(ctx.bc, &output);
1684 last_exp_param = ctx.bc->cf_last;
1685 }
1686 output.array_base = 61;
1687 if (next_clip_pos == 61)
1688 next_clip_pos = 62;
1689 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1690 ctx.shader->vs_out_misc_write = 1;
1691 ctx.shader->vs_out_viewport = 1;
1692 output.swizzle_x = 7;
1693 output.swizzle_y = 7;
1694 output.swizzle_z = 7;
1695 output.swizzle_w = 0;
1696 break;
1697 case TGSI_SEMANTIC_CLIPDIST:
1698 /* spi_sid is 0 for clipdistance outputs that were generated
1699 * for clipvertex - we don't need to pass them to PS */
1700 ctx.shader->clip_dist_write = gs->shader.clip_dist_write;
1701 if (out->spi_sid) {
1702 /* duplicate it as PARAM to pass to the pixel shader */
1703 output.array_base = next_param++;
1704 r600_bytecode_add_output(ctx.bc, &output);
1705 last_exp_param = ctx.bc->cf_last;
1706 }
1707 output.array_base = next_clip_pos++;
1708 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1709 break;
1710 case TGSI_SEMANTIC_FOG:
1711 output.swizzle_y = 4; /* 0 */
1712 output.swizzle_z = 4; /* 0 */
1713 output.swizzle_w = 5; /* 1 */
1714 break;
1715 default:
1716 output.array_base = next_param++;
1717 break;
1718 }
1719 r600_bytecode_add_output(ctx.bc, &output);
1720 if (output.type == V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM)
1721 last_exp_param = ctx.bc->cf_last;
1722 else
1723 last_exp_pos = ctx.bc->cf_last;
1724 }
1725
1726 if (!last_exp_pos) {
1727 memset(&output, 0, sizeof(output));
1728 output.gpr = 0;
1729 output.elem_size = 3;
1730 output.swizzle_x = 7;
1731 output.swizzle_y = 7;
1732 output.swizzle_z = 7;
1733 output.swizzle_w = 7;
1734 output.burst_count = 1;
1735 output.type = 2;
1736 output.op = CF_OP_EXPORT;
1737 output.array_base = 60;
1738 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1739 r600_bytecode_add_output(ctx.bc, &output);
1740 last_exp_pos = ctx.bc->cf_last;
1741 }
1742
1743 if (!last_exp_param) {
1744 memset(&output, 0, sizeof(output));
1745 output.gpr = 0;
1746 output.elem_size = 3;
1747 output.swizzle_x = 7;
1748 output.swizzle_y = 7;
1749 output.swizzle_z = 7;
1750 output.swizzle_w = 7;
1751 output.burst_count = 1;
1752 output.type = 2;
1753 output.op = CF_OP_EXPORT;
1754 output.array_base = next_param++;
1755 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
1756 r600_bytecode_add_output(ctx.bc, &output);
1757 last_exp_param = ctx.bc->cf_last;
1758 }
1759
1760 last_exp_pos->op = CF_OP_EXPORT_DONE;
1761 last_exp_param->op = CF_OP_EXPORT_DONE;
1762
1763 r600_bytecode_add_cfinst(ctx.bc, CF_OP_POP);
1764 cf_pop = ctx.bc->cf_last;
1765
1766 cf_jump->cf_addr = cf_pop->id + 2;
1767 cf_jump->pop_count = 1;
1768 cf_pop->cf_addr = cf_pop->id + 2;
1769 cf_pop->pop_count = 1;
1770
1771 if (ctx.bc->chip_class == CAYMAN)
1772 cm_bytecode_add_cf_end(ctx.bc);
1773 else {
1774 r600_bytecode_add_cfinst(ctx.bc, CF_OP_NOP);
1775 ctx.bc->cf_last->end_of_program = 1;
1776 }
1777
1778 gs->gs_copy_shader = cshader;
1779 cshader->enabled_stream_buffers_mask = ctx.enabled_stream_buffers_mask;
1780
1781 ctx.bc->nstack = 1;
1782
1783 return r600_bytecode_build(ctx.bc);
1784 }
1785
1786 static int emit_gs_ring_writes(struct r600_shader_ctx *ctx, const struct pipe_stream_output_info *so, int stream, bool ind)
1787 {
1788 struct r600_bytecode_output output;
1789 int i, k, ring_offset;
1790 int effective_stream = stream == -1 ? 0 : stream;
1791 int idx = 0;
1792
1793 for (i = 0; i < ctx->shader->noutput; i++) {
1794 if (ctx->gs_for_vs) {
1795 /* for ES we need to lookup corresponding ring offset expected by GS
1796 * (map this output to GS input by name and sid) */
1797 /* FIXME precompute offsets */
1798 ring_offset = -1;
1799 for(k = 0; k < ctx->gs_for_vs->ninput; ++k) {
1800 struct r600_shader_io *in = &ctx->gs_for_vs->input[k];
1801 struct r600_shader_io *out = &ctx->shader->output[i];
1802 if (in->name == out->name && in->sid == out->sid)
1803 ring_offset = in->ring_offset;
1804 }
1805
1806 if (ring_offset == -1)
1807 continue;
1808 } else {
1809 ring_offset = idx * 16;
1810 idx++;
1811 }
1812
1813 if (stream > 0 && ctx->shader->output[i].name == TGSI_SEMANTIC_POSITION)
1814 continue;
1815 /* next_ring_offset after parsing input decls contains total size of
1816 * single vertex data, gs_next_vertex - current vertex index */
1817 if (!ind)
1818 ring_offset += ctx->gs_out_ring_offset * ctx->gs_next_vertex;
1819
1820 memset(&output, 0, sizeof(struct r600_bytecode_output));
1821 output.gpr = ctx->shader->output[i].gpr;
1822 output.elem_size = 3;
1823 output.comp_mask = 0xF;
1824 output.burst_count = 1;
1825
1826 if (ind)
1827 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND;
1828 else
1829 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE;
1830
1831 switch (stream) {
1832 default:
1833 case 0:
1834 output.op = CF_OP_MEM_RING; break;
1835 case 1:
1836 output.op = CF_OP_MEM_RING1; break;
1837 case 2:
1838 output.op = CF_OP_MEM_RING2; break;
1839 case 3:
1840 output.op = CF_OP_MEM_RING3; break;
1841 }
1842
1843 if (ind) {
1844 output.array_base = ring_offset >> 2; /* in dwords */
1845 output.array_size = 0xfff;
1846 output.index_gpr = ctx->gs_export_gpr_tregs[effective_stream];
1847 } else
1848 output.array_base = ring_offset >> 2; /* in dwords */
1849 r600_bytecode_add_output(ctx->bc, &output);
1850 }
1851
1852 if (ind) {
1853 /* get a temp and add the ring offset to the next vertex base in the shader */
1854 struct r600_bytecode_alu alu;
1855 int r;
1856
1857 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1858 alu.op = ALU_OP2_ADD_INT;
1859 alu.src[0].sel = ctx->gs_export_gpr_tregs[effective_stream];
1860 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
1861 alu.src[1].value = ctx->gs_out_ring_offset >> 4;
1862 alu.dst.sel = ctx->gs_export_gpr_tregs[effective_stream];
1863 alu.dst.write = 1;
1864 alu.last = 1;
1865 r = r600_bytecode_add_alu(ctx->bc, &alu);
1866 if (r)
1867 return r;
1868 }
1869 ++ctx->gs_next_vertex;
1870 return 0;
1871 }
1872
1873 static int r600_shader_from_tgsi(struct r600_context *rctx,
1874 struct r600_pipe_shader *pipeshader,
1875 union r600_shader_key key)
1876 {
1877 struct r600_screen *rscreen = rctx->screen;
1878 struct r600_shader *shader = &pipeshader->shader;
1879 struct tgsi_token *tokens = pipeshader->selector->tokens;
1880 struct pipe_stream_output_info so = pipeshader->selector->so;
1881 struct tgsi_full_immediate *immediate;
1882 struct r600_shader_ctx ctx;
1883 struct r600_bytecode_output output[32];
1884 unsigned output_done, noutput;
1885 unsigned opcode;
1886 int i, j, k, r = 0;
1887 int next_param_base = 0, next_clip_base;
1888 int max_color_exports = MAX2(key.ps.nr_cbufs, 1);
1889 /* Declarations used by llvm code */
1890 bool use_llvm = false;
1891 bool indirect_gprs;
1892 bool ring_outputs = false;
1893 bool pos_emitted = false;
1894
1895 #ifdef R600_USE_LLVM
1896 use_llvm = rscreen->b.debug_flags & DBG_LLVM;
1897 #endif
1898 ctx.bc = &shader->bc;
1899 ctx.shader = shader;
1900 ctx.native_integers = true;
1901
1902
1903 r600_bytecode_init(ctx.bc, rscreen->b.chip_class, rscreen->b.family,
1904 rscreen->has_compressed_msaa_texturing);
1905 ctx.tokens = tokens;
1906 tgsi_scan_shader(tokens, &ctx.info);
1907 shader->indirect_files = ctx.info.indirect_files;
1908
1909 shader->uses_doubles = ctx.info.uses_doubles;
1910
1911 indirect_gprs = ctx.info.indirect_files & ~((1 << TGSI_FILE_CONSTANT) | (1 << TGSI_FILE_SAMPLER));
1912 tgsi_parse_init(&ctx.parse, tokens);
1913 ctx.type = ctx.info.processor;
1914 shader->processor_type = ctx.type;
1915 ctx.bc->type = shader->processor_type;
1916
1917 switch (ctx.type) {
1918 case TGSI_PROCESSOR_VERTEX:
1919 shader->vs_as_gs_a = key.vs.as_gs_a;
1920 shader->vs_as_es = key.vs.as_es;
1921 if (shader->vs_as_es)
1922 ring_outputs = true;
1923 break;
1924 case TGSI_PROCESSOR_GEOMETRY:
1925 ring_outputs = true;
1926 break;
1927 case TGSI_PROCESSOR_FRAGMENT:
1928 shader->two_side = key.ps.color_two_side;
1929 break;
1930 default:
1931 break;
1932 }
1933
1934 if (shader->vs_as_es) {
1935 ctx.gs_for_vs = &rctx->gs_shader->current->shader;
1936 } else {
1937 ctx.gs_for_vs = NULL;
1938 }
1939
1940 ctx.next_ring_offset = 0;
1941 ctx.gs_out_ring_offset = 0;
1942 ctx.gs_next_vertex = 0;
1943 ctx.gs_stream_output_info = &so;
1944
1945 ctx.face_gpr = -1;
1946 ctx.fixed_pt_position_gpr = -1;
1947 ctx.fragcoord_input = -1;
1948 ctx.colors_used = 0;
1949 ctx.clip_vertex_write = 0;
1950
1951 shader->nr_ps_color_exports = 0;
1952 shader->nr_ps_max_color_exports = 0;
1953
1954
1955 /* register allocations */
1956 /* Values [0,127] correspond to GPR[0..127].
1957 * Values [128,159] correspond to constant buffer bank 0
1958 * Values [160,191] correspond to constant buffer bank 1
1959 * Values [256,511] correspond to cfile constants c[0..255]. (Gone on EG)
1960 * Values [256,287] correspond to constant buffer bank 2 (EG)
1961 * Values [288,319] correspond to constant buffer bank 3 (EG)
1962 * Other special values are shown in the list below.
1963 * 244 ALU_SRC_1_DBL_L: special constant 1.0 double-float, LSW. (RV670+)
1964 * 245 ALU_SRC_1_DBL_M: special constant 1.0 double-float, MSW. (RV670+)
1965 * 246 ALU_SRC_0_5_DBL_L: special constant 0.5 double-float, LSW. (RV670+)
1966 * 247 ALU_SRC_0_5_DBL_M: special constant 0.5 double-float, MSW. (RV670+)
1967 * 248 SQ_ALU_SRC_0: special constant 0.0.
1968 * 249 SQ_ALU_SRC_1: special constant 1.0 float.
1969 * 250 SQ_ALU_SRC_1_INT: special constant 1 integer.
1970 * 251 SQ_ALU_SRC_M_1_INT: special constant -1 integer.
1971 * 252 SQ_ALU_SRC_0_5: special constant 0.5 float.
1972 * 253 SQ_ALU_SRC_LITERAL: literal constant.
1973 * 254 SQ_ALU_SRC_PV: previous vector result.
1974 * 255 SQ_ALU_SRC_PS: previous scalar result.
1975 */
1976 for (i = 0; i < TGSI_FILE_COUNT; i++) {
1977 ctx.file_offset[i] = 0;
1978 }
1979
1980 #ifdef R600_USE_LLVM
1981 if (use_llvm && ctx.info.indirect_files && (ctx.info.indirect_files & (1 << TGSI_FILE_CONSTANT)) != ctx.info.indirect_files) {
1982 fprintf(stderr, "Warning: R600 LLVM backend does not support "
1983 "indirect adressing. Falling back to TGSI "
1984 "backend.\n");
1985 use_llvm = 0;
1986 }
1987 #endif
1988 if (ctx.type == TGSI_PROCESSOR_VERTEX) {
1989 ctx.file_offset[TGSI_FILE_INPUT] = 1;
1990 if (!use_llvm) {
1991 r600_bytecode_add_cfinst(ctx.bc, CF_OP_CALL_FS);
1992 }
1993 }
1994 if (ctx.type == TGSI_PROCESSOR_FRAGMENT) {
1995 if (ctx.bc->chip_class >= EVERGREEN)
1996 ctx.file_offset[TGSI_FILE_INPUT] = evergreen_gpr_count(&ctx);
1997 else
1998 ctx.file_offset[TGSI_FILE_INPUT] = allocate_system_value_inputs(&ctx, ctx.file_offset[TGSI_FILE_INPUT]);
1999 }
2000 if (ctx.type == TGSI_PROCESSOR_GEOMETRY) {
2001 /* FIXME 1 would be enough in some cases (3 or less input vertices) */
2002 ctx.file_offset[TGSI_FILE_INPUT] = 2;
2003 }
2004 ctx.use_llvm = use_llvm;
2005
2006 if (use_llvm) {
2007 ctx.file_offset[TGSI_FILE_OUTPUT] =
2008 ctx.file_offset[TGSI_FILE_INPUT];
2009 } else {
2010 ctx.file_offset[TGSI_FILE_OUTPUT] =
2011 ctx.file_offset[TGSI_FILE_INPUT] +
2012 ctx.info.file_max[TGSI_FILE_INPUT] + 1;
2013 }
2014 ctx.file_offset[TGSI_FILE_TEMPORARY] = ctx.file_offset[TGSI_FILE_OUTPUT] +
2015 ctx.info.file_max[TGSI_FILE_OUTPUT] + 1;
2016
2017 /* Outside the GPR range. This will be translated to one of the
2018 * kcache banks later. */
2019 ctx.file_offset[TGSI_FILE_CONSTANT] = 512;
2020
2021 ctx.file_offset[TGSI_FILE_IMMEDIATE] = V_SQ_ALU_SRC_LITERAL;
2022 ctx.bc->ar_reg = ctx.file_offset[TGSI_FILE_TEMPORARY] +
2023 ctx.info.file_max[TGSI_FILE_TEMPORARY] + 1;
2024 ctx.bc->index_reg[0] = ctx.bc->ar_reg + 1;
2025 ctx.bc->index_reg[1] = ctx.bc->ar_reg + 2;
2026
2027 if (ctx.type == TGSI_PROCESSOR_GEOMETRY) {
2028 ctx.gs_export_gpr_tregs[0] = ctx.bc->ar_reg + 3;
2029 ctx.gs_export_gpr_tregs[1] = ctx.bc->ar_reg + 4;
2030 ctx.gs_export_gpr_tregs[2] = ctx.bc->ar_reg + 5;
2031 ctx.gs_export_gpr_tregs[3] = ctx.bc->ar_reg + 6;
2032 ctx.temp_reg = ctx.bc->ar_reg + 7;
2033 } else {
2034 ctx.temp_reg = ctx.bc->ar_reg + 3;
2035 }
2036
2037 shader->max_arrays = 0;
2038 shader->num_arrays = 0;
2039 if (indirect_gprs) {
2040
2041 if (ctx.info.indirect_files & (1 << TGSI_FILE_INPUT)) {
2042 r600_add_gpr_array(shader, ctx.file_offset[TGSI_FILE_INPUT],
2043 ctx.file_offset[TGSI_FILE_OUTPUT] -
2044 ctx.file_offset[TGSI_FILE_INPUT],
2045 0x0F);
2046 }
2047 if (ctx.info.indirect_files & (1 << TGSI_FILE_OUTPUT)) {
2048 r600_add_gpr_array(shader, ctx.file_offset[TGSI_FILE_OUTPUT],
2049 ctx.file_offset[TGSI_FILE_TEMPORARY] -
2050 ctx.file_offset[TGSI_FILE_OUTPUT],
2051 0x0F);
2052 }
2053 }
2054
2055 ctx.nliterals = 0;
2056 ctx.literals = NULL;
2057
2058 shader->fs_write_all = ctx.info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS];
2059 shader->vs_position_window_space = ctx.info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
2060 shader->ps_conservative_z = (uint8_t)ctx.info.properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT];
2061
2062 if (shader->vs_as_gs_a)
2063 vs_add_primid_output(&ctx, key.vs.prim_id_out);
2064
2065 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
2066 tgsi_parse_token(&ctx.parse);
2067 switch (ctx.parse.FullToken.Token.Type) {
2068 case TGSI_TOKEN_TYPE_IMMEDIATE:
2069 immediate = &ctx.parse.FullToken.FullImmediate;
2070 ctx.literals = realloc(ctx.literals, (ctx.nliterals + 1) * 16);
2071 if(ctx.literals == NULL) {
2072 r = -ENOMEM;
2073 goto out_err;
2074 }
2075 ctx.literals[ctx.nliterals * 4 + 0] = immediate->u[0].Uint;
2076 ctx.literals[ctx.nliterals * 4 + 1] = immediate->u[1].Uint;
2077 ctx.literals[ctx.nliterals * 4 + 2] = immediate->u[2].Uint;
2078 ctx.literals[ctx.nliterals * 4 + 3] = immediate->u[3].Uint;
2079 ctx.nliterals++;
2080 break;
2081 case TGSI_TOKEN_TYPE_DECLARATION:
2082 r = tgsi_declaration(&ctx);
2083 if (r)
2084 goto out_err;
2085 break;
2086 case TGSI_TOKEN_TYPE_INSTRUCTION:
2087 case TGSI_TOKEN_TYPE_PROPERTY:
2088 break;
2089 default:
2090 R600_ERR("unsupported token type %d\n", ctx.parse.FullToken.Token.Type);
2091 r = -EINVAL;
2092 goto out_err;
2093 }
2094 }
2095
2096 shader->ring_item_sizes[0] = ctx.next_ring_offset;
2097 shader->ring_item_sizes[1] = 0;
2098 shader->ring_item_sizes[2] = 0;
2099 shader->ring_item_sizes[3] = 0;
2100
2101 /* Process two side if needed */
2102 if (shader->two_side && ctx.colors_used) {
2103 int i, count = ctx.shader->ninput;
2104 unsigned next_lds_loc = ctx.shader->nlds;
2105
2106 /* additional inputs will be allocated right after the existing inputs,
2107 * we won't need them after the color selection, so we don't need to
2108 * reserve these gprs for the rest of the shader code and to adjust
2109 * output offsets etc. */
2110 int gpr = ctx.file_offset[TGSI_FILE_INPUT] +
2111 ctx.info.file_max[TGSI_FILE_INPUT] + 1;
2112
2113 /* if two sided and neither face or sample mask is used by shader, ensure face_gpr is emitted */
2114 if (ctx.face_gpr == -1) {
2115 i = ctx.shader->ninput++;
2116 ctx.shader->input[i].name = TGSI_SEMANTIC_FACE;
2117 ctx.shader->input[i].spi_sid = 0;
2118 ctx.shader->input[i].gpr = gpr++;
2119 ctx.face_gpr = ctx.shader->input[i].gpr;
2120 }
2121
2122 for (i = 0; i < count; i++) {
2123 if (ctx.shader->input[i].name == TGSI_SEMANTIC_COLOR) {
2124 int ni = ctx.shader->ninput++;
2125 memcpy(&ctx.shader->input[ni],&ctx.shader->input[i], sizeof(struct r600_shader_io));
2126 ctx.shader->input[ni].name = TGSI_SEMANTIC_BCOLOR;
2127 ctx.shader->input[ni].spi_sid = r600_spi_sid(&ctx.shader->input[ni]);
2128 ctx.shader->input[ni].gpr = gpr++;
2129 // TGSI to LLVM needs to know the lds position of inputs.
2130 // Non LLVM path computes it later (in process_twoside_color)
2131 ctx.shader->input[ni].lds_pos = next_lds_loc++;
2132 ctx.shader->input[i].back_color_input = ni;
2133 if (ctx.bc->chip_class >= EVERGREEN) {
2134 if ((r = evergreen_interp_input(&ctx, ni)))
2135 return r;
2136 }
2137 }
2138 }
2139 }
2140
2141 /* LLVM backend setup */
2142 #ifdef R600_USE_LLVM
2143 if (use_llvm) {
2144 struct radeon_llvm_context radeon_llvm_ctx;
2145 LLVMModuleRef mod;
2146 bool dump = r600_can_dump_shader(&rscreen->b, tokens);
2147 boolean use_kill = false;
2148
2149 memset(&radeon_llvm_ctx, 0, sizeof(radeon_llvm_ctx));
2150 radeon_llvm_ctx.type = ctx.type;
2151 radeon_llvm_ctx.two_side = shader->two_side;
2152 radeon_llvm_ctx.face_gpr = ctx.face_gpr;
2153 radeon_llvm_ctx.inputs_count = ctx.shader->ninput + 1;
2154 radeon_llvm_ctx.r600_inputs = ctx.shader->input;
2155 radeon_llvm_ctx.r600_outputs = ctx.shader->output;
2156 radeon_llvm_ctx.color_buffer_count = max_color_exports;
2157 radeon_llvm_ctx.chip_class = ctx.bc->chip_class;
2158 radeon_llvm_ctx.fs_color_all = shader->fs_write_all && (rscreen->b.chip_class >= EVERGREEN);
2159 radeon_llvm_ctx.stream_outputs = &so;
2160 radeon_llvm_ctx.alpha_to_one = key.ps.alpha_to_one;
2161 radeon_llvm_ctx.has_compressed_msaa_texturing =
2162 ctx.bc->has_compressed_msaa_texturing;
2163 mod = r600_tgsi_llvm(&radeon_llvm_ctx, tokens);
2164 ctx.shader->has_txq_cube_array_z_comp = radeon_llvm_ctx.has_txq_cube_array_z_comp;
2165 ctx.shader->uses_tex_buffers = radeon_llvm_ctx.uses_tex_buffers;
2166
2167 if (r600_llvm_compile(mod, rscreen->b.family, ctx.bc, &use_kill, dump)) {
2168 radeon_llvm_dispose(&radeon_llvm_ctx);
2169 use_llvm = 0;
2170 fprintf(stderr, "R600 LLVM backend failed to compile "
2171 "shader. Falling back to TGSI\n");
2172 } else {
2173 ctx.file_offset[TGSI_FILE_OUTPUT] =
2174 ctx.file_offset[TGSI_FILE_INPUT];
2175 }
2176 if (use_kill)
2177 ctx.shader->uses_kill = use_kill;
2178 radeon_llvm_dispose(&radeon_llvm_ctx);
2179 }
2180 #endif
2181 /* End of LLVM backend setup */
2182
2183 if (shader->fs_write_all && rscreen->b.chip_class >= EVERGREEN)
2184 shader->nr_ps_max_color_exports = 8;
2185
2186 if (!use_llvm) {
2187 if (ctx.fragcoord_input >= 0) {
2188 if (ctx.bc->chip_class == CAYMAN) {
2189 for (j = 0 ; j < 4; j++) {
2190 struct r600_bytecode_alu alu;
2191 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2192 alu.op = ALU_OP1_RECIP_IEEE;
2193 alu.src[0].sel = shader->input[ctx.fragcoord_input].gpr;
2194 alu.src[0].chan = 3;
2195
2196 alu.dst.sel = shader->input[ctx.fragcoord_input].gpr;
2197 alu.dst.chan = j;
2198 alu.dst.write = (j == 3);
2199 alu.last = 1;
2200 if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
2201 return r;
2202 }
2203 } else {
2204 struct r600_bytecode_alu alu;
2205 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2206 alu.op = ALU_OP1_RECIP_IEEE;
2207 alu.src[0].sel = shader->input[ctx.fragcoord_input].gpr;
2208 alu.src[0].chan = 3;
2209
2210 alu.dst.sel = shader->input[ctx.fragcoord_input].gpr;
2211 alu.dst.chan = 3;
2212 alu.dst.write = 1;
2213 alu.last = 1;
2214 if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
2215 return r;
2216 }
2217 }
2218
2219 if (ctx.type == TGSI_PROCESSOR_GEOMETRY) {
2220 struct r600_bytecode_alu alu;
2221 int r;
2222 for (j = 0; j < 4; j++) {
2223 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2224 alu.op = ALU_OP1_MOV;
2225 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
2226 alu.src[0].value = 0;
2227 alu.dst.sel = ctx.gs_export_gpr_tregs[j];
2228 alu.dst.write = 1;
2229 alu.last = 1;
2230 r = r600_bytecode_add_alu(ctx.bc, &alu);
2231 if (r)
2232 return r;
2233 }
2234 }
2235 if (shader->two_side && ctx.colors_used) {
2236 if ((r = process_twoside_color_inputs(&ctx)))
2237 return r;
2238 }
2239
2240 tgsi_parse_init(&ctx.parse, tokens);
2241 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
2242 tgsi_parse_token(&ctx.parse);
2243 switch (ctx.parse.FullToken.Token.Type) {
2244 case TGSI_TOKEN_TYPE_INSTRUCTION:
2245 r = tgsi_is_supported(&ctx);
2246 if (r)
2247 goto out_err;
2248 ctx.max_driver_temp_used = 0;
2249 /* reserve first tmp for everyone */
2250 r600_get_temp(&ctx);
2251
2252 opcode = ctx.parse.FullToken.FullInstruction.Instruction.Opcode;
2253 if ((r = tgsi_split_constant(&ctx)))
2254 goto out_err;
2255 if ((r = tgsi_split_literal_constant(&ctx)))
2256 goto out_err;
2257 if (ctx.type == TGSI_PROCESSOR_GEOMETRY)
2258 if ((r = tgsi_split_gs_inputs(&ctx)))
2259 goto out_err;
2260 if (ctx.bc->chip_class == CAYMAN)
2261 ctx.inst_info = &cm_shader_tgsi_instruction[opcode];
2262 else if (ctx.bc->chip_class >= EVERGREEN)
2263 ctx.inst_info = &eg_shader_tgsi_instruction[opcode];
2264 else
2265 ctx.inst_info = &r600_shader_tgsi_instruction[opcode];
2266 r = ctx.inst_info->process(&ctx);
2267 if (r)
2268 goto out_err;
2269 break;
2270 default:
2271 break;
2272 }
2273 }
2274 }
2275
2276 /* Reset the temporary register counter. */
2277 ctx.max_driver_temp_used = 0;
2278
2279 noutput = shader->noutput;
2280
2281 if (!ring_outputs && ctx.clip_vertex_write) {
2282 unsigned clipdist_temp[2];
2283
2284 clipdist_temp[0] = r600_get_temp(&ctx);
2285 clipdist_temp[1] = r600_get_temp(&ctx);
2286
2287 /* need to convert a clipvertex write into clipdistance writes and not export
2288 the clip vertex anymore */
2289
2290 memset(&shader->output[noutput], 0, 2*sizeof(struct r600_shader_io));
2291 shader->output[noutput].name = TGSI_SEMANTIC_CLIPDIST;
2292 shader->output[noutput].gpr = clipdist_temp[0];
2293 noutput++;
2294 shader->output[noutput].name = TGSI_SEMANTIC_CLIPDIST;
2295 shader->output[noutput].gpr = clipdist_temp[1];
2296 noutput++;
2297
2298 /* reset spi_sid for clipvertex output to avoid confusing spi */
2299 shader->output[ctx.cv_output].spi_sid = 0;
2300
2301 shader->clip_dist_write = 0xFF;
2302
2303 for (i = 0; i < 8; i++) {
2304 int oreg = i >> 2;
2305 int ochan = i & 3;
2306
2307 for (j = 0; j < 4; j++) {
2308 struct r600_bytecode_alu alu;
2309 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2310 alu.op = ALU_OP2_DOT4;
2311 alu.src[0].sel = shader->output[ctx.cv_output].gpr;
2312 alu.src[0].chan = j;
2313
2314 alu.src[1].sel = 512 + i;
2315 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
2316 alu.src[1].chan = j;
2317
2318 alu.dst.sel = clipdist_temp[oreg];
2319 alu.dst.chan = j;
2320 alu.dst.write = (j == ochan);
2321 if (j == 3)
2322 alu.last = 1;
2323 if (!use_llvm)
2324 r = r600_bytecode_add_alu(ctx.bc, &alu);
2325 if (r)
2326 return r;
2327 }
2328 }
2329 }
2330
2331 /* Add stream outputs. */
2332 if (!ring_outputs && ctx.type == TGSI_PROCESSOR_VERTEX &&
2333 so.num_outputs && !use_llvm)
2334 emit_streamout(&ctx, &so, -1, NULL);
2335
2336 pipeshader->enabled_stream_buffers_mask = ctx.enabled_stream_buffers_mask;
2337 convert_edgeflag_to_int(&ctx);
2338
2339 if (ring_outputs) {
2340 if (shader->vs_as_es) {
2341 ctx.gs_export_gpr_tregs[0] = r600_get_temp(&ctx);
2342 ctx.gs_export_gpr_tregs[1] = -1;
2343 ctx.gs_export_gpr_tregs[2] = -1;
2344 ctx.gs_export_gpr_tregs[3] = -1;
2345
2346 emit_gs_ring_writes(&ctx, &so, -1, FALSE);
2347 }
2348 } else {
2349 /* Export output */
2350 next_clip_base = shader->vs_out_misc_write ? 62 : 61;
2351
2352 for (i = 0, j = 0; i < noutput; i++, j++) {
2353 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
2354 output[j].gpr = shader->output[i].gpr;
2355 output[j].elem_size = 3;
2356 output[j].swizzle_x = 0;
2357 output[j].swizzle_y = 1;
2358 output[j].swizzle_z = 2;
2359 output[j].swizzle_w = 3;
2360 output[j].burst_count = 1;
2361 output[j].type = -1;
2362 output[j].op = CF_OP_EXPORT;
2363 switch (ctx.type) {
2364 case TGSI_PROCESSOR_VERTEX:
2365 switch (shader->output[i].name) {
2366 case TGSI_SEMANTIC_POSITION:
2367 output[j].array_base = 60;
2368 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2369 pos_emitted = true;
2370 break;
2371
2372 case TGSI_SEMANTIC_PSIZE:
2373 output[j].array_base = 61;
2374 output[j].swizzle_y = 7;
2375 output[j].swizzle_z = 7;
2376 output[j].swizzle_w = 7;
2377 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2378 pos_emitted = true;
2379 break;
2380 case TGSI_SEMANTIC_EDGEFLAG:
2381 output[j].array_base = 61;
2382 output[j].swizzle_x = 7;
2383 output[j].swizzle_y = 0;
2384 output[j].swizzle_z = 7;
2385 output[j].swizzle_w = 7;
2386 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2387 pos_emitted = true;
2388 break;
2389 case TGSI_SEMANTIC_LAYER:
2390 /* spi_sid is 0 for outputs that are
2391 * not consumed by PS */
2392 if (shader->output[i].spi_sid) {
2393 output[j].array_base = next_param_base++;
2394 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2395 j++;
2396 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
2397 }
2398 output[j].array_base = 61;
2399 output[j].swizzle_x = 7;
2400 output[j].swizzle_y = 7;
2401 output[j].swizzle_z = 0;
2402 output[j].swizzle_w = 7;
2403 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2404 pos_emitted = true;
2405 break;
2406 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2407 /* spi_sid is 0 for outputs that are
2408 * not consumed by PS */
2409 if (shader->output[i].spi_sid) {
2410 output[j].array_base = next_param_base++;
2411 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2412 j++;
2413 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
2414 }
2415 output[j].array_base = 61;
2416 output[j].swizzle_x = 7;
2417 output[j].swizzle_y = 7;
2418 output[j].swizzle_z = 7;
2419 output[j].swizzle_w = 0;
2420 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2421 pos_emitted = true;
2422 break;
2423 case TGSI_SEMANTIC_CLIPVERTEX:
2424 j--;
2425 break;
2426 case TGSI_SEMANTIC_CLIPDIST:
2427 output[j].array_base = next_clip_base++;
2428 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2429 pos_emitted = true;
2430 /* spi_sid is 0 for clipdistance outputs that were generated
2431 * for clipvertex - we don't need to pass them to PS */
2432 if (shader->output[i].spi_sid) {
2433 j++;
2434 /* duplicate it as PARAM to pass to the pixel shader */
2435 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
2436 output[j].array_base = next_param_base++;
2437 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2438 }
2439 break;
2440 case TGSI_SEMANTIC_FOG:
2441 output[j].swizzle_y = 4; /* 0 */
2442 output[j].swizzle_z = 4; /* 0 */
2443 output[j].swizzle_w = 5; /* 1 */
2444 break;
2445 case TGSI_SEMANTIC_PRIMID:
2446 output[j].swizzle_x = 2;
2447 output[j].swizzle_y = 4; /* 0 */
2448 output[j].swizzle_z = 4; /* 0 */
2449 output[j].swizzle_w = 4; /* 0 */
2450 break;
2451 }
2452
2453 break;
2454 case TGSI_PROCESSOR_FRAGMENT:
2455 if (shader->output[i].name == TGSI_SEMANTIC_COLOR) {
2456 /* never export more colors than the number of CBs */
2457 if (shader->output[i].sid >= max_color_exports) {
2458 /* skip export */
2459 j--;
2460 continue;
2461 }
2462 output[j].swizzle_w = key.ps.alpha_to_one ? 5 : 3;
2463 output[j].array_base = shader->output[i].sid;
2464 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
2465 shader->nr_ps_color_exports++;
2466 if (shader->fs_write_all && (rscreen->b.chip_class >= EVERGREEN)) {
2467 for (k = 1; k < max_color_exports; k++) {
2468 j++;
2469 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
2470 output[j].gpr = shader->output[i].gpr;
2471 output[j].elem_size = 3;
2472 output[j].swizzle_x = 0;
2473 output[j].swizzle_y = 1;
2474 output[j].swizzle_z = 2;
2475 output[j].swizzle_w = key.ps.alpha_to_one ? 5 : 3;
2476 output[j].burst_count = 1;
2477 output[j].array_base = k;
2478 output[j].op = CF_OP_EXPORT;
2479 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
2480 shader->nr_ps_color_exports++;
2481 }
2482 }
2483 } else if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
2484 output[j].array_base = 61;
2485 output[j].swizzle_x = 2;
2486 output[j].swizzle_y = 7;
2487 output[j].swizzle_z = output[j].swizzle_w = 7;
2488 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
2489 } else if (shader->output[i].name == TGSI_SEMANTIC_STENCIL) {
2490 output[j].array_base = 61;
2491 output[j].swizzle_x = 7;
2492 output[j].swizzle_y = 1;
2493 output[j].swizzle_z = output[j].swizzle_w = 7;
2494 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
2495 } else if (shader->output[i].name == TGSI_SEMANTIC_SAMPLEMASK) {
2496 output[j].array_base = 61;
2497 output[j].swizzle_x = 7;
2498 output[j].swizzle_y = 7;
2499 output[j].swizzle_z = 0;
2500 output[j].swizzle_w = 7;
2501 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
2502 } else {
2503 R600_ERR("unsupported fragment output name %d\n", shader->output[i].name);
2504 r = -EINVAL;
2505 goto out_err;
2506 }
2507 break;
2508 default:
2509 R600_ERR("unsupported processor type %d\n", ctx.type);
2510 r = -EINVAL;
2511 goto out_err;
2512 }
2513
2514 if (output[j].type==-1) {
2515 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2516 output[j].array_base = next_param_base++;
2517 }
2518 }
2519
2520 /* add fake position export */
2521 if (ctx.type == TGSI_PROCESSOR_VERTEX && pos_emitted == false) {
2522 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
2523 output[j].gpr = 0;
2524 output[j].elem_size = 3;
2525 output[j].swizzle_x = 7;
2526 output[j].swizzle_y = 7;
2527 output[j].swizzle_z = 7;
2528 output[j].swizzle_w = 7;
2529 output[j].burst_count = 1;
2530 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2531 output[j].array_base = 60;
2532 output[j].op = CF_OP_EXPORT;
2533 j++;
2534 }
2535
2536 /* add fake param output for vertex shader if no param is exported */
2537 if (ctx.type == TGSI_PROCESSOR_VERTEX && next_param_base == 0) {
2538 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
2539 output[j].gpr = 0;
2540 output[j].elem_size = 3;
2541 output[j].swizzle_x = 7;
2542 output[j].swizzle_y = 7;
2543 output[j].swizzle_z = 7;
2544 output[j].swizzle_w = 7;
2545 output[j].burst_count = 1;
2546 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2547 output[j].array_base = 0;
2548 output[j].op = CF_OP_EXPORT;
2549 j++;
2550 }
2551
2552 /* add fake pixel export */
2553 if (ctx.type == TGSI_PROCESSOR_FRAGMENT && shader->nr_ps_color_exports == 0) {
2554 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
2555 output[j].gpr = 0;
2556 output[j].elem_size = 3;
2557 output[j].swizzle_x = 7;
2558 output[j].swizzle_y = 7;
2559 output[j].swizzle_z = 7;
2560 output[j].swizzle_w = 7;
2561 output[j].burst_count = 1;
2562 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
2563 output[j].array_base = 0;
2564 output[j].op = CF_OP_EXPORT;
2565 j++;
2566 shader->nr_ps_color_exports++;
2567 }
2568
2569 noutput = j;
2570
2571 /* set export done on last export of each type */
2572 for (i = noutput - 1, output_done = 0; i >= 0; i--) {
2573 if (!(output_done & (1 << output[i].type))) {
2574 output_done |= (1 << output[i].type);
2575 output[i].op = CF_OP_EXPORT_DONE;
2576 }
2577 }
2578 /* add output to bytecode */
2579 if (!use_llvm) {
2580 for (i = 0; i < noutput; i++) {
2581 r = r600_bytecode_add_output(ctx.bc, &output[i]);
2582 if (r)
2583 goto out_err;
2584 }
2585 }
2586 }
2587
2588 /* add program end */
2589 if (!use_llvm) {
2590 if (ctx.bc->chip_class == CAYMAN)
2591 cm_bytecode_add_cf_end(ctx.bc);
2592 else {
2593 const struct cf_op_info *last = NULL;
2594
2595 if (ctx.bc->cf_last)
2596 last = r600_isa_cf(ctx.bc->cf_last->op);
2597
2598 /* alu clause instructions don't have EOP bit, so add NOP */
2599 if (!last || last->flags & CF_ALU || ctx.bc->cf_last->op == CF_OP_LOOP_END || ctx.bc->cf_last->op == CF_OP_CALL_FS)
2600 r600_bytecode_add_cfinst(ctx.bc, CF_OP_NOP);
2601
2602 ctx.bc->cf_last->end_of_program = 1;
2603 }
2604 }
2605
2606 /* check GPR limit - we have 124 = 128 - 4
2607 * (4 are reserved as alu clause temporary registers) */
2608 if (ctx.bc->ngpr > 124) {
2609 R600_ERR("GPR limit exceeded - shader requires %d registers\n", ctx.bc->ngpr);
2610 r = -ENOMEM;
2611 goto out_err;
2612 }
2613
2614 if (ctx.type == TGSI_PROCESSOR_GEOMETRY) {
2615 if ((r = generate_gs_copy_shader(rctx, pipeshader, &so)))
2616 return r;
2617 }
2618
2619 free(ctx.literals);
2620 tgsi_parse_free(&ctx.parse);
2621 return 0;
2622 out_err:
2623 free(ctx.literals);
2624 tgsi_parse_free(&ctx.parse);
2625 return r;
2626 }
2627
2628 static int tgsi_unsupported(struct r600_shader_ctx *ctx)
2629 {
2630 const unsigned tgsi_opcode =
2631 ctx->parse.FullToken.FullInstruction.Instruction.Opcode;
2632 R600_ERR("%s tgsi opcode unsupported\n",
2633 tgsi_get_opcode_name(tgsi_opcode));
2634 return -EINVAL;
2635 }
2636
2637 static int tgsi_end(struct r600_shader_ctx *ctx)
2638 {
2639 return 0;
2640 }
2641
2642 static void r600_bytecode_src(struct r600_bytecode_alu_src *bc_src,
2643 const struct r600_shader_src *shader_src,
2644 unsigned chan)
2645 {
2646 bc_src->sel = shader_src->sel;
2647 bc_src->chan = shader_src->swizzle[chan];
2648 bc_src->neg = shader_src->neg;
2649 bc_src->abs = shader_src->abs;
2650 bc_src->rel = shader_src->rel;
2651 bc_src->value = shader_src->value[bc_src->chan];
2652 bc_src->kc_bank = shader_src->kc_bank;
2653 bc_src->kc_rel = shader_src->kc_rel;
2654 }
2655
2656 static void r600_bytecode_src_set_abs(struct r600_bytecode_alu_src *bc_src)
2657 {
2658 bc_src->abs = 1;
2659 bc_src->neg = 0;
2660 }
2661
2662 static void r600_bytecode_src_toggle_neg(struct r600_bytecode_alu_src *bc_src)
2663 {
2664 bc_src->neg = !bc_src->neg;
2665 }
2666
2667 static void tgsi_dst(struct r600_shader_ctx *ctx,
2668 const struct tgsi_full_dst_register *tgsi_dst,
2669 unsigned swizzle,
2670 struct r600_bytecode_alu_dst *r600_dst)
2671 {
2672 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2673
2674 r600_dst->sel = tgsi_dst->Register.Index;
2675 r600_dst->sel += ctx->file_offset[tgsi_dst->Register.File];
2676 r600_dst->chan = swizzle;
2677 r600_dst->write = 1;
2678 if (tgsi_dst->Register.Indirect)
2679 r600_dst->rel = V_SQ_REL_RELATIVE;
2680 if (inst->Instruction.Saturate) {
2681 r600_dst->clamp = 1;
2682 }
2683 }
2684
2685 static int tgsi_last_instruction(unsigned writemask)
2686 {
2687 int i, lasti = 0;
2688
2689 for (i = 0; i < 4; i++) {
2690 if (writemask & (1 << i)) {
2691 lasti = i;
2692 }
2693 }
2694 return lasti;
2695 }
2696
2697
2698
2699 static int tgsi_op2_64_params(struct r600_shader_ctx *ctx, bool singledest, bool swap)
2700 {
2701 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2702 unsigned write_mask = inst->Dst[0].Register.WriteMask;
2703 struct r600_bytecode_alu alu;
2704 int i, j, r, lasti = tgsi_last_instruction(write_mask);
2705 int use_tmp = 0;
2706
2707 if (singledest) {
2708 switch (write_mask) {
2709 case 0x1:
2710 write_mask = 0x3;
2711 break;
2712 case 0x2:
2713 use_tmp = 1;
2714 write_mask = 0x3;
2715 break;
2716 case 0x4:
2717 write_mask = 0xc;
2718 break;
2719 case 0x8:
2720 write_mask = 0xc;
2721 use_tmp = 3;
2722 break;
2723 }
2724 }
2725
2726 lasti = tgsi_last_instruction(write_mask);
2727 for (i = 0; i <= lasti; i++) {
2728
2729 if (!(write_mask & (1 << i)))
2730 continue;
2731
2732 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2733
2734 if (singledest) {
2735 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2736 if (use_tmp) {
2737 alu.dst.sel = ctx->temp_reg;
2738 alu.dst.chan = i;
2739 alu.dst.write = 1;
2740 }
2741 if (i == 1 || i == 3)
2742 alu.dst.write = 0;
2743 } else
2744 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2745
2746 alu.op = ctx->inst_info->op;
2747 if (ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DABS) {
2748 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
2749 } else if (!swap) {
2750 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
2751 r600_bytecode_src(&alu.src[j], &ctx->src[j], fp64_switch(i));
2752 }
2753 } else {
2754 r600_bytecode_src(&alu.src[0], &ctx->src[1], fp64_switch(i));
2755 r600_bytecode_src(&alu.src[1], &ctx->src[0], fp64_switch(i));
2756 }
2757
2758 /* handle some special cases */
2759 if (i == 1 || i == 3) {
2760 switch (ctx->parse.FullToken.FullInstruction.Instruction.Opcode) {
2761 case TGSI_OPCODE_SUB:
2762 r600_bytecode_src_toggle_neg(&alu.src[1]);
2763 break;
2764 case TGSI_OPCODE_DABS:
2765 r600_bytecode_src_set_abs(&alu.src[0]);
2766 break;
2767 default:
2768 break;
2769 }
2770 }
2771 if (i == lasti) {
2772 alu.last = 1;
2773 }
2774 r = r600_bytecode_add_alu(ctx->bc, &alu);
2775 if (r)
2776 return r;
2777 }
2778
2779 if (use_tmp) {
2780 write_mask = inst->Dst[0].Register.WriteMask;
2781
2782 /* move result from temp to dst */
2783 for (i = 0; i <= lasti; i++) {
2784 if (!(write_mask & (1 << i)))
2785 continue;
2786
2787 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2788 alu.op = ALU_OP1_MOV;
2789 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2790 alu.src[0].sel = ctx->temp_reg;
2791 alu.src[0].chan = use_tmp - 1;
2792 alu.last = (i == lasti);
2793
2794 r = r600_bytecode_add_alu(ctx->bc, &alu);
2795 if (r)
2796 return r;
2797 }
2798 }
2799 return 0;
2800 }
2801
2802 static int tgsi_op2_64(struct r600_shader_ctx *ctx)
2803 {
2804 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2805 unsigned write_mask = inst->Dst[0].Register.WriteMask;
2806 /* confirm writemasking */
2807 if ((write_mask & 0x3) != 0x3 &&
2808 (write_mask & 0xc) != 0xc) {
2809 fprintf(stderr, "illegal writemask for 64-bit: 0x%x\n", write_mask);
2810 return -1;
2811 }
2812 return tgsi_op2_64_params(ctx, false, false);
2813 }
2814
2815 static int tgsi_op2_64_single_dest(struct r600_shader_ctx *ctx)
2816 {
2817 return tgsi_op2_64_params(ctx, true, false);
2818 }
2819
2820 static int tgsi_op2_64_single_dest_s(struct r600_shader_ctx *ctx)
2821 {
2822 return tgsi_op2_64_params(ctx, true, true);
2823 }
2824
2825 static int tgsi_op3_64(struct r600_shader_ctx *ctx)
2826 {
2827 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2828 struct r600_bytecode_alu alu;
2829 int i, j, r;
2830 int lasti = 3;
2831 int tmp = r600_get_temp(ctx);
2832
2833 for (i = 0; i < lasti + 1; i++) {
2834
2835 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2836 alu.op = ctx->inst_info->op;
2837 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
2838 r600_bytecode_src(&alu.src[j], &ctx->src[j], i == 3 ? 0 : 1);
2839 }
2840
2841 if (inst->Dst[0].Register.WriteMask & (1 << i))
2842 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2843 else
2844 alu.dst.sel = tmp;
2845
2846 alu.dst.chan = i;
2847 alu.is_op3 = 1;
2848 if (i == lasti) {
2849 alu.last = 1;
2850 }
2851 r = r600_bytecode_add_alu(ctx->bc, &alu);
2852 if (r)
2853 return r;
2854 }
2855 return 0;
2856 }
2857
2858 static int tgsi_op2_s(struct r600_shader_ctx *ctx, int swap, int trans_only)
2859 {
2860 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2861 struct r600_bytecode_alu alu;
2862 unsigned write_mask = inst->Dst[0].Register.WriteMask;
2863 int i, j, r, lasti = tgsi_last_instruction(write_mask);
2864 /* use temp register if trans_only and more than one dst component */
2865 int use_tmp = trans_only && (write_mask ^ (1 << lasti));
2866
2867 for (i = 0; i <= lasti; i++) {
2868 if (!(write_mask & (1 << i)))
2869 continue;
2870
2871 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2872 if (use_tmp) {
2873 alu.dst.sel = ctx->temp_reg;
2874 alu.dst.chan = i;
2875 alu.dst.write = 1;
2876 } else
2877 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2878
2879 alu.op = ctx->inst_info->op;
2880 if (!swap) {
2881 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
2882 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
2883 }
2884 } else {
2885 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
2886 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
2887 }
2888 /* handle some special cases */
2889 switch (inst->Instruction.Opcode) {
2890 case TGSI_OPCODE_SUB:
2891 r600_bytecode_src_toggle_neg(&alu.src[1]);
2892 break;
2893 case TGSI_OPCODE_ABS:
2894 r600_bytecode_src_set_abs(&alu.src[0]);
2895 break;
2896 default:
2897 break;
2898 }
2899 if (i == lasti || trans_only) {
2900 alu.last = 1;
2901 }
2902 r = r600_bytecode_add_alu(ctx->bc, &alu);
2903 if (r)
2904 return r;
2905 }
2906
2907 if (use_tmp) {
2908 /* move result from temp to dst */
2909 for (i = 0; i <= lasti; i++) {
2910 if (!(write_mask & (1 << i)))
2911 continue;
2912
2913 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2914 alu.op = ALU_OP1_MOV;
2915 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2916 alu.src[0].sel = ctx->temp_reg;
2917 alu.src[0].chan = i;
2918 alu.last = (i == lasti);
2919
2920 r = r600_bytecode_add_alu(ctx->bc, &alu);
2921 if (r)
2922 return r;
2923 }
2924 }
2925 return 0;
2926 }
2927
2928 static int tgsi_op2(struct r600_shader_ctx *ctx)
2929 {
2930 return tgsi_op2_s(ctx, 0, 0);
2931 }
2932
2933 static int tgsi_op2_swap(struct r600_shader_ctx *ctx)
2934 {
2935 return tgsi_op2_s(ctx, 1, 0);
2936 }
2937
2938 static int tgsi_op2_trans(struct r600_shader_ctx *ctx)
2939 {
2940 return tgsi_op2_s(ctx, 0, 1);
2941 }
2942
2943 static int tgsi_ineg(struct r600_shader_ctx *ctx)
2944 {
2945 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2946 struct r600_bytecode_alu alu;
2947 int i, r;
2948 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
2949
2950 for (i = 0; i < lasti + 1; i++) {
2951
2952 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
2953 continue;
2954 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2955 alu.op = ctx->inst_info->op;
2956
2957 alu.src[0].sel = V_SQ_ALU_SRC_0;
2958
2959 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
2960
2961 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2962
2963 if (i == lasti) {
2964 alu.last = 1;
2965 }
2966 r = r600_bytecode_add_alu(ctx->bc, &alu);
2967 if (r)
2968 return r;
2969 }
2970 return 0;
2971
2972 }
2973
2974 static int tgsi_dneg(struct r600_shader_ctx *ctx)
2975 {
2976 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2977 struct r600_bytecode_alu alu;
2978 int i, r;
2979 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
2980
2981 for (i = 0; i < lasti + 1; i++) {
2982
2983 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
2984 continue;
2985 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2986 alu.op = ALU_OP1_MOV;
2987
2988 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
2989
2990 if (i == 1 || i == 3)
2991 r600_bytecode_src_toggle_neg(&alu.src[0]);
2992 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2993
2994 if (i == lasti) {
2995 alu.last = 1;
2996 }
2997 r = r600_bytecode_add_alu(ctx->bc, &alu);
2998 if (r)
2999 return r;
3000 }
3001 return 0;
3002
3003 }
3004
3005 static int tgsi_dfracexp(struct r600_shader_ctx *ctx)
3006 {
3007 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3008 struct r600_bytecode_alu alu;
3009 unsigned write_mask = inst->Dst[0].Register.WriteMask;
3010 int i, j, r;
3011 int firsti = write_mask == 0xc ? 2 : 0;
3012
3013 for (i = 0; i <= 3; i++) {
3014 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3015 alu.op = ctx->inst_info->op;
3016
3017 alu.dst.sel = ctx->temp_reg;
3018 alu.dst.chan = i;
3019 alu.dst.write = 1;
3020 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
3021 r600_bytecode_src(&alu.src[j], &ctx->src[j], fp64_switch(i));
3022 }
3023
3024 if (i == 3)
3025 alu.last = 1;
3026
3027 r = r600_bytecode_add_alu(ctx->bc, &alu);
3028 if (r)
3029 return r;
3030 }
3031
3032 /* MOV first two channels to writemask dst0 */
3033 for (i = 0; i <= 1; i++) {
3034 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3035 alu.op = ALU_OP1_MOV;
3036 alu.src[0].chan = i + 2;
3037 alu.src[0].sel = ctx->temp_reg;
3038
3039 tgsi_dst(ctx, &inst->Dst[0], firsti + i, &alu.dst);
3040 alu.dst.write = (inst->Dst[0].Register.WriteMask >> (firsti + i)) & 1;
3041 alu.last = 1;
3042 r = r600_bytecode_add_alu(ctx->bc, &alu);
3043 if (r)
3044 return r;
3045 }
3046
3047 for (i = 0; i <= 3; i++) {
3048 if (inst->Dst[1].Register.WriteMask & (1 << i)) {
3049 /* MOV third channels to writemask dst1 */
3050 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3051 alu.op = ALU_OP1_MOV;
3052 alu.src[0].chan = 1;
3053 alu.src[0].sel = ctx->temp_reg;
3054
3055 tgsi_dst(ctx, &inst->Dst[1], i, &alu.dst);
3056 alu.last = 1;
3057 r = r600_bytecode_add_alu(ctx->bc, &alu);
3058 if (r)
3059 return r;
3060 break;
3061 }
3062 }
3063 return 0;
3064 }
3065
3066
3067 static int egcm_int_to_double(struct r600_shader_ctx *ctx)
3068 {
3069 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3070 struct r600_bytecode_alu alu;
3071 int i, r;
3072 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
3073
3074 assert(inst->Instruction.Opcode == TGSI_OPCODE_I2D ||
3075 inst->Instruction.Opcode == TGSI_OPCODE_U2D);
3076
3077 for (i = 0; i <= (lasti+1)/2; i++) {
3078 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3079 alu.op = ctx->inst_info->op;
3080
3081 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
3082 alu.dst.sel = ctx->temp_reg;
3083 alu.dst.chan = i;
3084 alu.dst.write = 1;
3085 alu.last = 1;
3086
3087 r = r600_bytecode_add_alu(ctx->bc, &alu);
3088 if (r)
3089 return r;
3090 }
3091
3092 for (i = 0; i <= lasti; i++) {
3093 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3094 alu.op = ALU_OP1_FLT32_TO_FLT64;
3095
3096 alu.src[0].chan = i/2;
3097 if (i%2 == 0)
3098 alu.src[0].sel = ctx->temp_reg;
3099 else {
3100 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
3101 alu.src[0].value = 0x0;
3102 }
3103 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3104 alu.last = i == lasti;
3105
3106 r = r600_bytecode_add_alu(ctx->bc, &alu);
3107 if (r)
3108 return r;
3109 }
3110
3111 return 0;
3112 }
3113
3114 static int egcm_double_to_int(struct r600_shader_ctx *ctx)
3115 {
3116 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3117 struct r600_bytecode_alu alu;
3118 int i, r;
3119 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
3120
3121 assert(inst->Instruction.Opcode == TGSI_OPCODE_D2I ||
3122 inst->Instruction.Opcode == TGSI_OPCODE_D2U);
3123
3124 for (i = 0; i <= lasti; i++) {
3125 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3126 alu.op = ALU_OP1_FLT64_TO_FLT32;
3127
3128 r600_bytecode_src(&alu.src[0], &ctx->src[0], fp64_switch(i));
3129 alu.dst.chan = i;
3130 alu.dst.sel = ctx->temp_reg;
3131 alu.dst.write = i%2 == 0;
3132 alu.last = i == lasti;
3133
3134 r = r600_bytecode_add_alu(ctx->bc, &alu);
3135 if (r)
3136 return r;
3137 }
3138
3139 for (i = 0; i <= (lasti+1)/2; i++) {
3140 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3141 alu.op = ctx->inst_info->op;
3142
3143 alu.src[0].chan = i*2;
3144 alu.src[0].sel = ctx->temp_reg;
3145 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
3146 alu.last = 1;
3147
3148 r = r600_bytecode_add_alu(ctx->bc, &alu);
3149 if (r)
3150 return r;
3151 }
3152
3153 return 0;
3154 }
3155
3156 static int cayman_emit_double_instr(struct r600_shader_ctx *ctx)
3157 {
3158 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3159 int i, r;
3160 struct r600_bytecode_alu alu;
3161 int last_slot = 3;
3162 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
3163 int t1 = ctx->temp_reg;
3164
3165 /* these have to write the result to X/Y by the looks of it */
3166 for (i = 0 ; i < last_slot; i++) {
3167 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3168 alu.op = ctx->inst_info->op;
3169
3170 /* should only be one src regs */
3171 assert (inst->Instruction.NumSrcRegs == 1);
3172
3173 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
3174 r600_bytecode_src(&alu.src[1], &ctx->src[0], 0);
3175
3176 /* RSQ should take the absolute value of src */
3177 if (ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DRSQ ||
3178 ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DSQRT) {
3179 r600_bytecode_src_set_abs(&alu.src[1]);
3180 }
3181 alu.dst.sel = t1;
3182 alu.dst.chan = i;
3183 alu.dst.write = (i == 0 || i == 1);
3184
3185 if (ctx->bc->chip_class != CAYMAN || i == last_slot - 1)
3186 alu.last = 1;
3187 r = r600_bytecode_add_alu(ctx->bc, &alu);
3188 if (r)
3189 return r;
3190 }
3191
3192 for (i = 0 ; i <= lasti; i++) {
3193 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
3194 continue;
3195 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3196 alu.op = ALU_OP1_MOV;
3197 alu.src[0].sel = t1;
3198 alu.src[0].chan = (i == 0 || i == 2) ? 0 : 1;
3199 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3200 alu.dst.write = 1;
3201 if (i == lasti)
3202 alu.last = 1;
3203 r = r600_bytecode_add_alu(ctx->bc, &alu);
3204 if (r)
3205 return r;
3206 }
3207 return 0;
3208 }
3209
3210 static int cayman_emit_float_instr(struct r600_shader_ctx *ctx)
3211 {
3212 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3213 int i, j, r;
3214 struct r600_bytecode_alu alu;
3215 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
3216
3217 for (i = 0 ; i < last_slot; i++) {
3218 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3219 alu.op = ctx->inst_info->op;
3220 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
3221 r600_bytecode_src(&alu.src[j], &ctx->src[j], 0);
3222
3223 /* RSQ should take the absolute value of src */
3224 if (inst->Instruction.Opcode == TGSI_OPCODE_RSQ) {
3225 r600_bytecode_src_set_abs(&alu.src[j]);
3226 }
3227 }
3228 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3229 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
3230
3231 if (i == last_slot - 1)
3232 alu.last = 1;
3233 r = r600_bytecode_add_alu(ctx->bc, &alu);
3234 if (r)
3235 return r;
3236 }
3237 return 0;
3238 }
3239
3240 static int cayman_mul_int_instr(struct r600_shader_ctx *ctx)
3241 {
3242 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3243 int i, j, k, r;
3244 struct r600_bytecode_alu alu;
3245 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
3246 int t1 = ctx->temp_reg;
3247
3248 for (k = 0; k <= lasti; k++) {
3249 if (!(inst->Dst[0].Register.WriteMask & (1 << k)))
3250 continue;
3251
3252 for (i = 0 ; i < 4; i++) {
3253 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3254 alu.op = ctx->inst_info->op;
3255 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
3256 r600_bytecode_src(&alu.src[j], &ctx->src[j], k);
3257 }
3258 alu.dst.sel = t1;
3259 alu.dst.chan = i;
3260 alu.dst.write = (i == k);
3261 if (i == 3)
3262 alu.last = 1;
3263 r = r600_bytecode_add_alu(ctx->bc, &alu);
3264 if (r)
3265 return r;
3266 }
3267 }
3268
3269 for (i = 0 ; i <= lasti; i++) {
3270 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
3271 continue;
3272 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3273 alu.op = ALU_OP1_MOV;
3274 alu.src[0].sel = t1;
3275 alu.src[0].chan = i;
3276 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3277 alu.dst.write = 1;
3278 if (i == lasti)
3279 alu.last = 1;
3280 r = r600_bytecode_add_alu(ctx->bc, &alu);
3281 if (r)
3282 return r;
3283 }
3284
3285 return 0;
3286 }
3287
3288
3289 static int cayman_mul_double_instr(struct r600_shader_ctx *ctx)
3290 {
3291 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3292 int i, j, k, r;
3293 struct r600_bytecode_alu alu;
3294 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
3295 int t1 = ctx->temp_reg;
3296
3297 for (k = 0; k < 2; k++) {
3298 if (!(inst->Dst[0].Register.WriteMask & (0x3 << (k * 2))))
3299 continue;
3300
3301 for (i = 0; i < 4; i++) {
3302 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3303 alu.op = ctx->inst_info->op;
3304 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
3305 r600_bytecode_src(&alu.src[j], &ctx->src[j], k * 2 + ((i == 3) ? 0 : 1));;
3306 }
3307 alu.dst.sel = t1;
3308 alu.dst.chan = i;
3309 alu.dst.write = 1;
3310 if (i == 3)
3311 alu.last = 1;
3312 r = r600_bytecode_add_alu(ctx->bc, &alu);
3313 if (r)
3314 return r;
3315 }
3316 }
3317
3318 for (i = 0; i <= lasti; i++) {
3319 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
3320 continue;
3321 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3322 alu.op = ALU_OP1_MOV;
3323 alu.src[0].sel = t1;
3324 alu.src[0].chan = i;
3325 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3326 alu.dst.write = 1;
3327 if (i == lasti)
3328 alu.last = 1;
3329 r = r600_bytecode_add_alu(ctx->bc, &alu);
3330 if (r)
3331 return r;
3332 }
3333
3334 return 0;
3335 }
3336
3337 /*
3338 * r600 - trunc to -PI..PI range
3339 * r700 - normalize by dividing by 2PI
3340 * see fdo bug 27901
3341 */
3342 static int tgsi_setup_trig(struct r600_shader_ctx *ctx)
3343 {
3344 static float half_inv_pi = 1.0 /(3.1415926535 * 2);
3345 static float double_pi = 3.1415926535 * 2;
3346 static float neg_pi = -3.1415926535;
3347
3348 int r;
3349 struct r600_bytecode_alu alu;
3350
3351 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3352 alu.op = ALU_OP3_MULADD;
3353 alu.is_op3 = 1;
3354
3355 alu.dst.chan = 0;
3356 alu.dst.sel = ctx->temp_reg;
3357 alu.dst.write = 1;
3358
3359 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
3360
3361 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
3362 alu.src[1].chan = 0;
3363 alu.src[1].value = *(uint32_t *)&half_inv_pi;
3364 alu.src[2].sel = V_SQ_ALU_SRC_0_5;
3365 alu.src[2].chan = 0;
3366 alu.last = 1;
3367 r = r600_bytecode_add_alu(ctx->bc, &alu);
3368 if (r)
3369 return r;
3370
3371 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3372 alu.op = ALU_OP1_FRACT;
3373
3374 alu.dst.chan = 0;
3375 alu.dst.sel = ctx->temp_reg;
3376 alu.dst.write = 1;
3377
3378 alu.src[0].sel = ctx->temp_reg;
3379 alu.src[0].chan = 0;
3380 alu.last = 1;
3381 r = r600_bytecode_add_alu(ctx->bc, &alu);
3382 if (r)
3383 return r;
3384
3385 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3386 alu.op = ALU_OP3_MULADD;
3387 alu.is_op3 = 1;
3388
3389 alu.dst.chan = 0;
3390 alu.dst.sel = ctx->temp_reg;
3391 alu.dst.write = 1;
3392
3393 alu.src[0].sel = ctx->temp_reg;
3394 alu.src[0].chan = 0;
3395
3396 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
3397 alu.src[1].chan = 0;
3398 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
3399 alu.src[2].chan = 0;
3400
3401 if (ctx->bc->chip_class == R600) {
3402 alu.src[1].value = *(uint32_t *)&double_pi;
3403 alu.src[2].value = *(uint32_t *)&neg_pi;
3404 } else {
3405 alu.src[1].sel = V_SQ_ALU_SRC_1;
3406 alu.src[2].sel = V_SQ_ALU_SRC_0_5;
3407 alu.src[2].neg = 1;
3408 }
3409
3410 alu.last = 1;
3411 r = r600_bytecode_add_alu(ctx->bc, &alu);
3412 if (r)
3413 return r;
3414 return 0;
3415 }
3416
3417 static int cayman_trig(struct r600_shader_ctx *ctx)
3418 {
3419 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3420 struct r600_bytecode_alu alu;
3421 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
3422 int i, r;
3423
3424 r = tgsi_setup_trig(ctx);
3425 if (r)
3426 return r;
3427
3428
3429 for (i = 0; i < last_slot; i++) {
3430 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3431 alu.op = ctx->inst_info->op;
3432 alu.dst.chan = i;
3433
3434 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3435 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
3436
3437 alu.src[0].sel = ctx->temp_reg;
3438 alu.src[0].chan = 0;
3439 if (i == last_slot - 1)
3440 alu.last = 1;
3441 r = r600_bytecode_add_alu(ctx->bc, &alu);
3442 if (r)
3443 return r;
3444 }
3445 return 0;
3446 }
3447
3448 static int tgsi_trig(struct r600_shader_ctx *ctx)
3449 {
3450 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3451 struct r600_bytecode_alu alu;
3452 int i, r;
3453 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
3454
3455 r = tgsi_setup_trig(ctx);
3456 if (r)
3457 return r;
3458
3459 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3460 alu.op = ctx->inst_info->op;
3461 alu.dst.chan = 0;
3462 alu.dst.sel = ctx->temp_reg;
3463 alu.dst.write = 1;
3464
3465 alu.src[0].sel = ctx->temp_reg;
3466 alu.src[0].chan = 0;
3467 alu.last = 1;
3468 r = r600_bytecode_add_alu(ctx->bc, &alu);
3469 if (r)
3470 return r;
3471
3472 /* replicate result */
3473 for (i = 0; i < lasti + 1; i++) {
3474 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
3475 continue;
3476
3477 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3478 alu.op = ALU_OP1_MOV;
3479
3480 alu.src[0].sel = ctx->temp_reg;
3481 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3482 if (i == lasti)
3483 alu.last = 1;
3484 r = r600_bytecode_add_alu(ctx->bc, &alu);
3485 if (r)
3486 return r;
3487 }
3488 return 0;
3489 }
3490
3491 static int tgsi_scs(struct r600_shader_ctx *ctx)
3492 {
3493 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3494 struct r600_bytecode_alu alu;
3495 int i, r;
3496
3497 /* We'll only need the trig stuff if we are going to write to the
3498 * X or Y components of the destination vector.
3499 */
3500 if (likely(inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_XY)) {
3501 r = tgsi_setup_trig(ctx);
3502 if (r)
3503 return r;
3504 }
3505
3506 /* dst.x = COS */
3507 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
3508 if (ctx->bc->chip_class == CAYMAN) {
3509 for (i = 0 ; i < 3; i++) {
3510 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3511 alu.op = ALU_OP1_COS;
3512 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3513
3514 if (i == 0)
3515 alu.dst.write = 1;
3516 else
3517 alu.dst.write = 0;
3518 alu.src[0].sel = ctx->temp_reg;
3519 alu.src[0].chan = 0;
3520 if (i == 2)
3521 alu.last = 1;
3522 r = r600_bytecode_add_alu(ctx->bc, &alu);
3523 if (r)
3524 return r;
3525 }
3526 } else {
3527 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3528 alu.op = ALU_OP1_COS;
3529 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
3530
3531 alu.src[0].sel = ctx->temp_reg;
3532 alu.src[0].chan = 0;
3533 alu.last = 1;
3534 r = r600_bytecode_add_alu(ctx->bc, &alu);
3535 if (r)
3536 return r;
3537 }
3538 }
3539
3540 /* dst.y = SIN */
3541 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
3542 if (ctx->bc->chip_class == CAYMAN) {
3543 for (i = 0 ; i < 3; i++) {
3544 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3545 alu.op = ALU_OP1_SIN;
3546 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3547 if (i == 1)
3548 alu.dst.write = 1;
3549 else
3550 alu.dst.write = 0;
3551 alu.src[0].sel = ctx->temp_reg;
3552 alu.src[0].chan = 0;
3553 if (i == 2)
3554 alu.last = 1;
3555 r = r600_bytecode_add_alu(ctx->bc, &alu);
3556 if (r)
3557 return r;
3558 }
3559 } else {
3560 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3561 alu.op = ALU_OP1_SIN;
3562 tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
3563
3564 alu.src[0].sel = ctx->temp_reg;
3565 alu.src[0].chan = 0;
3566 alu.last = 1;
3567 r = r600_bytecode_add_alu(ctx->bc, &alu);
3568 if (r)
3569 return r;
3570 }
3571 }
3572
3573 /* dst.z = 0.0; */
3574 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
3575 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3576
3577 alu.op = ALU_OP1_MOV;
3578
3579 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
3580
3581 alu.src[0].sel = V_SQ_ALU_SRC_0;
3582 alu.src[0].chan = 0;
3583
3584 alu.last = 1;
3585
3586 r = r600_bytecode_add_alu(ctx->bc, &alu);
3587 if (r)
3588 return r;
3589 }
3590
3591 /* dst.w = 1.0; */
3592 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
3593 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3594
3595 alu.op = ALU_OP1_MOV;
3596
3597 tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
3598
3599 alu.src[0].sel = V_SQ_ALU_SRC_1;
3600 alu.src[0].chan = 0;
3601
3602 alu.last = 1;
3603
3604 r = r600_bytecode_add_alu(ctx->bc, &alu);
3605 if (r)
3606 return r;
3607 }
3608
3609 return 0;
3610 }
3611
3612 static int tgsi_kill(struct r600_shader_ctx *ctx)
3613 {
3614 const struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3615 struct r600_bytecode_alu alu;
3616 int i, r;
3617
3618 for (i = 0; i < 4; i++) {
3619 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3620 alu.op = ctx->inst_info->op;
3621
3622 alu.dst.chan = i;
3623
3624 alu.src[0].sel = V_SQ_ALU_SRC_0;
3625
3626 if (inst->Instruction.Opcode == TGSI_OPCODE_KILL) {
3627 alu.src[1].sel = V_SQ_ALU_SRC_1;
3628 alu.src[1].neg = 1;
3629 } else {
3630 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
3631 }
3632 if (i == 3) {
3633 alu.last = 1;
3634 }
3635 r = r600_bytecode_add_alu(ctx->bc, &alu);
3636 if (r)
3637 return r;
3638 }
3639
3640 /* kill must be last in ALU */
3641 ctx->bc->force_add_cf = 1;
3642 ctx->shader->uses_kill = TRUE;
3643 return 0;
3644 }
3645
3646 static int tgsi_lit(struct r600_shader_ctx *ctx)
3647 {
3648 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3649 struct r600_bytecode_alu alu;
3650 int r;
3651
3652 /* tmp.x = max(src.y, 0.0) */
3653 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3654 alu.op = ALU_OP2_MAX;
3655 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
3656 alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/
3657 alu.src[1].chan = 1;
3658
3659 alu.dst.sel = ctx->temp_reg;
3660 alu.dst.chan = 0;
3661 alu.dst.write = 1;
3662
3663 alu.last = 1;
3664 r = r600_bytecode_add_alu(ctx->bc, &alu);
3665 if (r)
3666 return r;
3667
3668 if (inst->Dst[0].Register.WriteMask & (1 << 2))
3669 {
3670 int chan;
3671 int sel;
3672 int i;
3673
3674 if (ctx->bc->chip_class == CAYMAN) {
3675 for (i = 0; i < 3; i++) {
3676 /* tmp.z = log(tmp.x) */
3677 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3678 alu.op = ALU_OP1_LOG_CLAMPED;
3679 alu.src[0].sel = ctx->temp_reg;
3680 alu.src[0].chan = 0;
3681 alu.dst.sel = ctx->temp_reg;
3682 alu.dst.chan = i;
3683 if (i == 2) {
3684 alu.dst.write = 1;
3685 alu.last = 1;
3686 } else
3687 alu.dst.write = 0;
3688
3689 r = r600_bytecode_add_alu(ctx->bc, &alu);
3690 if (r)
3691 return r;
3692 }
3693 } else {
3694 /* tmp.z = log(tmp.x) */
3695 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3696 alu.op = ALU_OP1_LOG_CLAMPED;
3697 alu.src[0].sel = ctx->temp_reg;
3698 alu.src[0].chan = 0;
3699 alu.dst.sel = ctx->temp_reg;
3700 alu.dst.chan = 2;
3701 alu.dst.write = 1;
3702 alu.last = 1;
3703 r = r600_bytecode_add_alu(ctx->bc, &alu);
3704 if (r)
3705 return r;
3706 }
3707
3708 chan = alu.dst.chan;
3709 sel = alu.dst.sel;
3710
3711 /* tmp.x = amd MUL_LIT(tmp.z, src.w, src.x ) */
3712 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3713 alu.op = ALU_OP3_MUL_LIT;
3714 alu.src[0].sel = sel;
3715 alu.src[0].chan = chan;
3716 r600_bytecode_src(&alu.src[1], &ctx->src[0], 3);
3717 r600_bytecode_src(&alu.src[2], &ctx->src[0], 0);
3718 alu.dst.sel = ctx->temp_reg;
3719 alu.dst.chan = 0;
3720 alu.dst.write = 1;
3721 alu.is_op3 = 1;
3722 alu.last = 1;
3723 r = r600_bytecode_add_alu(ctx->bc, &alu);
3724 if (r)
3725 return r;
3726
3727 if (ctx->bc->chip_class == CAYMAN) {
3728 for (i = 0; i < 3; i++) {
3729 /* dst.z = exp(tmp.x) */
3730 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3731 alu.op = ALU_OP1_EXP_IEEE;
3732 alu.src[0].sel = ctx->temp_reg;
3733 alu.src[0].chan = 0;
3734 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3735 if (i == 2) {
3736 alu.dst.write = 1;
3737 alu.last = 1;
3738 } else
3739 alu.dst.write = 0;
3740 r = r600_bytecode_add_alu(ctx->bc, &alu);
3741 if (r)
3742 return r;
3743 }
3744 } else {
3745 /* dst.z = exp(tmp.x) */
3746 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3747 alu.op = ALU_OP1_EXP_IEEE;
3748 alu.src[0].sel = ctx->temp_reg;
3749 alu.src[0].chan = 0;
3750 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
3751 alu.last = 1;
3752 r = r600_bytecode_add_alu(ctx->bc, &alu);
3753 if (r)
3754 return r;
3755 }
3756 }
3757
3758 /* dst.x, <- 1.0 */
3759 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3760 alu.op = ALU_OP1_MOV;
3761 alu.src[0].sel = V_SQ_ALU_SRC_1; /*1.0*/
3762 alu.src[0].chan = 0;
3763 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
3764 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 0) & 1;
3765 r = r600_bytecode_add_alu(ctx->bc, &alu);
3766 if (r)
3767 return r;
3768
3769 /* dst.y = max(src.x, 0.0) */
3770 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3771 alu.op = ALU_OP2_MAX;
3772 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
3773 alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/
3774 alu.src[1].chan = 0;
3775 tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
3776 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 1) & 1;
3777 r = r600_bytecode_add_alu(ctx->bc, &alu);
3778 if (r)
3779 return r;
3780
3781 /* dst.w, <- 1.0 */
3782 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3783 alu.op = ALU_OP1_MOV;
3784 alu.src[0].sel = V_SQ_ALU_SRC_1;
3785 alu.src[0].chan = 0;
3786 tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
3787 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 3) & 1;
3788 alu.last = 1;
3789 r = r600_bytecode_add_alu(ctx->bc, &alu);
3790 if (r)
3791 return r;
3792
3793 return 0;
3794 }
3795
3796 static int tgsi_rsq(struct r600_shader_ctx *ctx)
3797 {
3798 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3799 struct r600_bytecode_alu alu;
3800 int i, r;
3801
3802 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3803
3804 /* XXX:
3805 * For state trackers other than OpenGL, we'll want to use
3806 * _RECIPSQRT_IEEE instead.
3807 */
3808 alu.op = ALU_OP1_RECIPSQRT_CLAMPED;
3809
3810 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
3811 r600_bytecode_src(&alu.src[i], &ctx->src[i], 0);
3812 r600_bytecode_src_set_abs(&alu.src[i]);
3813 }
3814 alu.dst.sel = ctx->temp_reg;
3815 alu.dst.write = 1;
3816 alu.last = 1;
3817 r = r600_bytecode_add_alu(ctx->bc, &alu);
3818 if (r)
3819 return r;
3820 /* replicate result */
3821 return tgsi_helper_tempx_replicate(ctx);
3822 }
3823
3824 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx)
3825 {
3826 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3827 struct r600_bytecode_alu alu;
3828 int i, r;
3829
3830 for (i = 0; i < 4; i++) {
3831 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3832 alu.src[0].sel = ctx->temp_reg;
3833 alu.op = ALU_OP1_MOV;
3834 alu.dst.chan = i;
3835 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3836 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
3837 if (i == 3)
3838 alu.last = 1;
3839 r = r600_bytecode_add_alu(ctx->bc, &alu);
3840 if (r)
3841 return r;
3842 }
3843 return 0;
3844 }
3845
3846 static int tgsi_trans_srcx_replicate(struct r600_shader_ctx *ctx)
3847 {
3848 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3849 struct r600_bytecode_alu alu;
3850 int i, r;
3851
3852 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3853 alu.op = ctx->inst_info->op;
3854 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
3855 r600_bytecode_src(&alu.src[i], &ctx->src[i], 0);
3856 }
3857 alu.dst.sel = ctx->temp_reg;
3858 alu.dst.write = 1;
3859 alu.last = 1;
3860 r = r600_bytecode_add_alu(ctx->bc, &alu);
3861 if (r)
3862 return r;
3863 /* replicate result */
3864 return tgsi_helper_tempx_replicate(ctx);
3865 }
3866
3867 static int cayman_pow(struct r600_shader_ctx *ctx)
3868 {
3869 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3870 int i, r;
3871 struct r600_bytecode_alu alu;
3872 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
3873
3874 for (i = 0; i < 3; i++) {
3875 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3876 alu.op = ALU_OP1_LOG_IEEE;
3877 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
3878 alu.dst.sel = ctx->temp_reg;
3879 alu.dst.chan = i;
3880 alu.dst.write = 1;
3881 if (i == 2)
3882 alu.last = 1;
3883 r = r600_bytecode_add_alu(ctx->bc, &alu);
3884 if (r)
3885 return r;
3886 }
3887
3888 /* b * LOG2(a) */
3889 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3890 alu.op = ALU_OP2_MUL;
3891 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
3892 alu.src[1].sel = ctx->temp_reg;
3893 alu.dst.sel = ctx->temp_reg;
3894 alu.dst.write = 1;
3895 alu.last = 1;
3896 r = r600_bytecode_add_alu(ctx->bc, &alu);
3897 if (r)
3898 return r;
3899
3900 for (i = 0; i < last_slot; i++) {
3901 /* POW(a,b) = EXP2(b * LOG2(a))*/
3902 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3903 alu.op = ALU_OP1_EXP_IEEE;
3904 alu.src[0].sel = ctx->temp_reg;
3905
3906 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3907 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
3908 if (i == last_slot - 1)
3909 alu.last = 1;
3910 r = r600_bytecode_add_alu(ctx->bc, &alu);
3911 if (r)
3912 return r;
3913 }
3914 return 0;
3915 }
3916
3917 static int tgsi_pow(struct r600_shader_ctx *ctx)
3918 {
3919 struct r600_bytecode_alu alu;
3920 int r;
3921
3922 /* LOG2(a) */
3923 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3924 alu.op = ALU_OP1_LOG_IEEE;
3925 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
3926 alu.dst.sel = ctx->temp_reg;
3927 alu.dst.write = 1;
3928 alu.last = 1;
3929 r = r600_bytecode_add_alu(ctx->bc, &alu);
3930 if (r)
3931 return r;
3932 /* b * LOG2(a) */
3933 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3934 alu.op = ALU_OP2_MUL;
3935 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
3936 alu.src[1].sel = ctx->temp_reg;
3937 alu.dst.sel = ctx->temp_reg;
3938 alu.dst.write = 1;
3939 alu.last = 1;
3940 r = r600_bytecode_add_alu(ctx->bc, &alu);
3941 if (r)
3942 return r;
3943 /* POW(a,b) = EXP2(b * LOG2(a))*/
3944 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3945 alu.op = ALU_OP1_EXP_IEEE;
3946 alu.src[0].sel = ctx->temp_reg;
3947 alu.dst.sel = ctx->temp_reg;
3948 alu.dst.write = 1;
3949 alu.last = 1;
3950 r = r600_bytecode_add_alu(ctx->bc, &alu);
3951 if (r)
3952 return r;
3953 return tgsi_helper_tempx_replicate(ctx);
3954 }
3955
3956 static int tgsi_divmod(struct r600_shader_ctx *ctx, int mod, int signed_op)
3957 {
3958 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3959 struct r600_bytecode_alu alu;
3960 int i, r, j;
3961 unsigned write_mask = inst->Dst[0].Register.WriteMask;
3962 int tmp0 = ctx->temp_reg;
3963 int tmp1 = r600_get_temp(ctx);
3964 int tmp2 = r600_get_temp(ctx);
3965 int tmp3 = r600_get_temp(ctx);
3966 /* Unsigned path:
3967 *
3968 * we need to represent src1 as src2*q + r, where q - quotient, r - remainder
3969 *
3970 * 1. tmp0.x = rcp (src2) = 2^32/src2 + e, where e is rounding error
3971 * 2. tmp0.z = lo (tmp0.x * src2)
3972 * 3. tmp0.w = -tmp0.z
3973 * 4. tmp0.y = hi (tmp0.x * src2)
3974 * 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src2))
3975 * 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error
3976 * 7. tmp1.x = tmp0.x - tmp0.w
3977 * 8. tmp1.y = tmp0.x + tmp0.w
3978 * 9. tmp0.x = (tmp0.y == 0 ? tmp1.y : tmp1.x)
3979 * 10. tmp0.z = hi(tmp0.x * src1) = q
3980 * 11. tmp0.y = lo (tmp0.z * src2) = src2*q = src1 - r
3981 *
3982 * 12. tmp0.w = src1 - tmp0.y = r
3983 * 13. tmp1.x = tmp0.w >= src2 = r >= src2 (uint comparison)
3984 * 14. tmp1.y = src1 >= tmp0.y = r >= 0 (uint comparison)
3985 *
3986 * if DIV
3987 *
3988 * 15. tmp1.z = tmp0.z + 1 = q + 1
3989 * 16. tmp1.w = tmp0.z - 1 = q - 1
3990 *
3991 * else MOD
3992 *
3993 * 15. tmp1.z = tmp0.w - src2 = r - src2
3994 * 16. tmp1.w = tmp0.w + src2 = r + src2
3995 *
3996 * endif
3997 *
3998 * 17. tmp1.x = tmp1.x & tmp1.y
3999 *
4000 * DIV: 18. tmp0.z = tmp1.x==0 ? tmp0.z : tmp1.z
4001 * MOD: 18. tmp0.z = tmp1.x==0 ? tmp0.w : tmp1.z
4002 *
4003 * 19. tmp0.z = tmp1.y==0 ? tmp1.w : tmp0.z
4004 * 20. dst = src2==0 ? MAX_UINT : tmp0.z
4005 *
4006 * Signed path:
4007 *
4008 * Same as unsigned, using abs values of the operands,
4009 * and fixing the sign of the result in the end.
4010 */
4011
4012 for (i = 0; i < 4; i++) {
4013 if (!(write_mask & (1<<i)))
4014 continue;
4015
4016 if (signed_op) {
4017
4018 /* tmp2.x = -src0 */
4019 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4020 alu.op = ALU_OP2_SUB_INT;
4021
4022 alu.dst.sel = tmp2;
4023 alu.dst.chan = 0;
4024 alu.dst.write = 1;
4025
4026 alu.src[0].sel = V_SQ_ALU_SRC_0;
4027
4028 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4029
4030 alu.last = 1;
4031 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4032 return r;
4033
4034 /* tmp2.y = -src1 */
4035 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4036 alu.op = ALU_OP2_SUB_INT;
4037
4038 alu.dst.sel = tmp2;
4039 alu.dst.chan = 1;
4040 alu.dst.write = 1;
4041
4042 alu.src[0].sel = V_SQ_ALU_SRC_0;
4043
4044 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4045
4046 alu.last = 1;
4047 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4048 return r;
4049
4050 /* tmp2.z sign bit is set if src0 and src2 signs are different */
4051 /* it will be a sign of the quotient */
4052 if (!mod) {
4053
4054 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4055 alu.op = ALU_OP2_XOR_INT;
4056
4057 alu.dst.sel = tmp2;
4058 alu.dst.chan = 2;
4059 alu.dst.write = 1;
4060
4061 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4062 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4063
4064 alu.last = 1;
4065 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4066 return r;
4067 }
4068
4069 /* tmp2.x = |src0| */
4070 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4071 alu.op = ALU_OP3_CNDGE_INT;
4072 alu.is_op3 = 1;
4073
4074 alu.dst.sel = tmp2;
4075 alu.dst.chan = 0;
4076 alu.dst.write = 1;
4077
4078 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4079 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4080 alu.src[2].sel = tmp2;
4081 alu.src[2].chan = 0;
4082
4083 alu.last = 1;
4084 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4085 return r;
4086
4087 /* tmp2.y = |src1| */
4088 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4089 alu.op = ALU_OP3_CNDGE_INT;
4090 alu.is_op3 = 1;
4091
4092 alu.dst.sel = tmp2;
4093 alu.dst.chan = 1;
4094 alu.dst.write = 1;
4095
4096 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
4097 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4098 alu.src[2].sel = tmp2;
4099 alu.src[2].chan = 1;
4100
4101 alu.last = 1;
4102 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4103 return r;
4104
4105 }
4106
4107 /* 1. tmp0.x = rcp_u (src2) = 2^32/src2 + e, where e is rounding error */
4108 if (ctx->bc->chip_class == CAYMAN) {
4109 /* tmp3.x = u2f(src2) */
4110 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4111 alu.op = ALU_OP1_UINT_TO_FLT;
4112
4113 alu.dst.sel = tmp3;
4114 alu.dst.chan = 0;
4115 alu.dst.write = 1;
4116
4117 if (signed_op) {
4118 alu.src[0].sel = tmp2;
4119 alu.src[0].chan = 1;
4120 } else {
4121 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
4122 }
4123
4124 alu.last = 1;
4125 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4126 return r;
4127
4128 /* tmp0.x = recip(tmp3.x) */
4129 for (j = 0 ; j < 3; j++) {
4130 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4131 alu.op = ALU_OP1_RECIP_IEEE;
4132
4133 alu.dst.sel = tmp0;
4134 alu.dst.chan = j;
4135 alu.dst.write = (j == 0);
4136
4137 alu.src[0].sel = tmp3;
4138 alu.src[0].chan = 0;
4139
4140 if (j == 2)
4141 alu.last = 1;
4142 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4143 return r;
4144 }
4145
4146 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4147 alu.op = ALU_OP2_MUL;
4148
4149 alu.src[0].sel = tmp0;
4150 alu.src[0].chan = 0;
4151
4152 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
4153 alu.src[1].value = 0x4f800000;
4154
4155 alu.dst.sel = tmp3;
4156 alu.dst.write = 1;
4157 alu.last = 1;
4158 r = r600_bytecode_add_alu(ctx->bc, &alu);
4159 if (r)
4160 return r;
4161
4162 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4163 alu.op = ALU_OP1_FLT_TO_UINT;
4164
4165 alu.dst.sel = tmp0;
4166 alu.dst.chan = 0;
4167 alu.dst.write = 1;
4168
4169 alu.src[0].sel = tmp3;
4170 alu.src[0].chan = 0;
4171
4172 alu.last = 1;
4173 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4174 return r;
4175
4176 } else {
4177 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4178 alu.op = ALU_OP1_RECIP_UINT;
4179
4180 alu.dst.sel = tmp0;
4181 alu.dst.chan = 0;
4182 alu.dst.write = 1;
4183
4184 if (signed_op) {
4185 alu.src[0].sel = tmp2;
4186 alu.src[0].chan = 1;
4187 } else {
4188 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
4189 }
4190
4191 alu.last = 1;
4192 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4193 return r;
4194 }
4195
4196 /* 2. tmp0.z = lo (tmp0.x * src2) */
4197 if (ctx->bc->chip_class == CAYMAN) {
4198 for (j = 0 ; j < 4; j++) {
4199 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4200 alu.op = ALU_OP2_MULLO_UINT;
4201
4202 alu.dst.sel = tmp0;
4203 alu.dst.chan = j;
4204 alu.dst.write = (j == 2);
4205
4206 alu.src[0].sel = tmp0;
4207 alu.src[0].chan = 0;
4208 if (signed_op) {
4209 alu.src[1].sel = tmp2;
4210 alu.src[1].chan = 1;
4211 } else {
4212 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4213 }
4214
4215 alu.last = (j == 3);
4216 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4217 return r;
4218 }
4219 } else {
4220 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4221 alu.op = ALU_OP2_MULLO_UINT;
4222
4223 alu.dst.sel = tmp0;
4224 alu.dst.chan = 2;
4225 alu.dst.write = 1;
4226
4227 alu.src[0].sel = tmp0;
4228 alu.src[0].chan = 0;
4229 if (signed_op) {
4230 alu.src[1].sel = tmp2;
4231 alu.src[1].chan = 1;
4232 } else {
4233 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4234 }
4235
4236 alu.last = 1;
4237 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4238 return r;
4239 }
4240
4241 /* 3. tmp0.w = -tmp0.z */
4242 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4243 alu.op = ALU_OP2_SUB_INT;
4244
4245 alu.dst.sel = tmp0;
4246 alu.dst.chan = 3;
4247 alu.dst.write = 1;
4248
4249 alu.src[0].sel = V_SQ_ALU_SRC_0;
4250 alu.src[1].sel = tmp0;
4251 alu.src[1].chan = 2;
4252
4253 alu.last = 1;
4254 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4255 return r;
4256
4257 /* 4. tmp0.y = hi (tmp0.x * src2) */
4258 if (ctx->bc->chip_class == CAYMAN) {
4259 for (j = 0 ; j < 4; j++) {
4260 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4261 alu.op = ALU_OP2_MULHI_UINT;
4262
4263 alu.dst.sel = tmp0;
4264 alu.dst.chan = j;
4265 alu.dst.write = (j == 1);
4266
4267 alu.src[0].sel = tmp0;
4268 alu.src[0].chan = 0;
4269
4270 if (signed_op) {
4271 alu.src[1].sel = tmp2;
4272 alu.src[1].chan = 1;
4273 } else {
4274 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4275 }
4276 alu.last = (j == 3);
4277 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4278 return r;
4279 }
4280 } else {
4281 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4282 alu.op = ALU_OP2_MULHI_UINT;
4283
4284 alu.dst.sel = tmp0;
4285 alu.dst.chan = 1;
4286 alu.dst.write = 1;
4287
4288 alu.src[0].sel = tmp0;
4289 alu.src[0].chan = 0;
4290
4291 if (signed_op) {
4292 alu.src[1].sel = tmp2;
4293 alu.src[1].chan = 1;
4294 } else {
4295 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4296 }
4297
4298 alu.last = 1;
4299 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4300 return r;
4301 }
4302
4303 /* 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src)) */
4304 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4305 alu.op = ALU_OP3_CNDE_INT;
4306 alu.is_op3 = 1;
4307
4308 alu.dst.sel = tmp0;
4309 alu.dst.chan = 2;
4310 alu.dst.write = 1;
4311
4312 alu.src[0].sel = tmp0;
4313 alu.src[0].chan = 1;
4314 alu.src[1].sel = tmp0;
4315 alu.src[1].chan = 3;
4316 alu.src[2].sel = tmp0;
4317 alu.src[2].chan = 2;
4318
4319 alu.last = 1;
4320 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4321 return r;
4322
4323 /* 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error */
4324 if (ctx->bc->chip_class == CAYMAN) {
4325 for (j = 0 ; j < 4; j++) {
4326 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4327 alu.op = ALU_OP2_MULHI_UINT;
4328
4329 alu.dst.sel = tmp0;
4330 alu.dst.chan = j;
4331 alu.dst.write = (j == 3);
4332
4333 alu.src[0].sel = tmp0;
4334 alu.src[0].chan = 2;
4335
4336 alu.src[1].sel = tmp0;
4337 alu.src[1].chan = 0;
4338
4339 alu.last = (j == 3);
4340 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4341 return r;
4342 }
4343 } else {
4344 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4345 alu.op = ALU_OP2_MULHI_UINT;
4346
4347 alu.dst.sel = tmp0;
4348 alu.dst.chan = 3;
4349 alu.dst.write = 1;
4350
4351 alu.src[0].sel = tmp0;
4352 alu.src[0].chan = 2;
4353
4354 alu.src[1].sel = tmp0;
4355 alu.src[1].chan = 0;
4356
4357 alu.last = 1;
4358 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4359 return r;
4360 }
4361
4362 /* 7. tmp1.x = tmp0.x - tmp0.w */
4363 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4364 alu.op = ALU_OP2_SUB_INT;
4365
4366 alu.dst.sel = tmp1;
4367 alu.dst.chan = 0;
4368 alu.dst.write = 1;
4369
4370 alu.src[0].sel = tmp0;
4371 alu.src[0].chan = 0;
4372 alu.src[1].sel = tmp0;
4373 alu.src[1].chan = 3;
4374
4375 alu.last = 1;
4376 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4377 return r;
4378
4379 /* 8. tmp1.y = tmp0.x + tmp0.w */
4380 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4381 alu.op = ALU_OP2_ADD_INT;
4382
4383 alu.dst.sel = tmp1;
4384 alu.dst.chan = 1;
4385 alu.dst.write = 1;
4386
4387 alu.src[0].sel = tmp0;
4388 alu.src[0].chan = 0;
4389 alu.src[1].sel = tmp0;
4390 alu.src[1].chan = 3;
4391
4392 alu.last = 1;
4393 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4394 return r;
4395
4396 /* 9. tmp0.x = (tmp0.y == 0 ? tmp1.y : tmp1.x) */
4397 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4398 alu.op = ALU_OP3_CNDE_INT;
4399 alu.is_op3 = 1;
4400
4401 alu.dst.sel = tmp0;
4402 alu.dst.chan = 0;
4403 alu.dst.write = 1;
4404
4405 alu.src[0].sel = tmp0;
4406 alu.src[0].chan = 1;
4407 alu.src[1].sel = tmp1;
4408 alu.src[1].chan = 1;
4409 alu.src[2].sel = tmp1;
4410 alu.src[2].chan = 0;
4411
4412 alu.last = 1;
4413 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4414 return r;
4415
4416 /* 10. tmp0.z = hi(tmp0.x * src1) = q */
4417 if (ctx->bc->chip_class == CAYMAN) {
4418 for (j = 0 ; j < 4; j++) {
4419 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4420 alu.op = ALU_OP2_MULHI_UINT;
4421
4422 alu.dst.sel = tmp0;
4423 alu.dst.chan = j;
4424 alu.dst.write = (j == 2);
4425
4426 alu.src[0].sel = tmp0;
4427 alu.src[0].chan = 0;
4428
4429 if (signed_op) {
4430 alu.src[1].sel = tmp2;
4431 alu.src[1].chan = 0;
4432 } else {
4433 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4434 }
4435
4436 alu.last = (j == 3);
4437 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4438 return r;
4439 }
4440 } else {
4441 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4442 alu.op = ALU_OP2_MULHI_UINT;
4443
4444 alu.dst.sel = tmp0;
4445 alu.dst.chan = 2;
4446 alu.dst.write = 1;
4447
4448 alu.src[0].sel = tmp0;
4449 alu.src[0].chan = 0;
4450
4451 if (signed_op) {
4452 alu.src[1].sel = tmp2;
4453 alu.src[1].chan = 0;
4454 } else {
4455 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4456 }
4457
4458 alu.last = 1;
4459 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4460 return r;
4461 }
4462
4463 /* 11. tmp0.y = lo (src2 * tmp0.z) = src2*q = src1 - r */
4464 if (ctx->bc->chip_class == CAYMAN) {
4465 for (j = 0 ; j < 4; j++) {
4466 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4467 alu.op = ALU_OP2_MULLO_UINT;
4468
4469 alu.dst.sel = tmp0;
4470 alu.dst.chan = j;
4471 alu.dst.write = (j == 1);
4472
4473 if (signed_op) {
4474 alu.src[0].sel = tmp2;
4475 alu.src[0].chan = 1;
4476 } else {
4477 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
4478 }
4479
4480 alu.src[1].sel = tmp0;
4481 alu.src[1].chan = 2;
4482
4483 alu.last = (j == 3);
4484 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4485 return r;
4486 }
4487 } else {
4488 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4489 alu.op = ALU_OP2_MULLO_UINT;
4490
4491 alu.dst.sel = tmp0;
4492 alu.dst.chan = 1;
4493 alu.dst.write = 1;
4494
4495 if (signed_op) {
4496 alu.src[0].sel = tmp2;
4497 alu.src[0].chan = 1;
4498 } else {
4499 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
4500 }
4501
4502 alu.src[1].sel = tmp0;
4503 alu.src[1].chan = 2;
4504
4505 alu.last = 1;
4506 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4507 return r;
4508 }
4509
4510 /* 12. tmp0.w = src1 - tmp0.y = r */
4511 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4512 alu.op = ALU_OP2_SUB_INT;
4513
4514 alu.dst.sel = tmp0;
4515 alu.dst.chan = 3;
4516 alu.dst.write = 1;
4517
4518 if (signed_op) {
4519 alu.src[0].sel = tmp2;
4520 alu.src[0].chan = 0;
4521 } else {
4522 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4523 }
4524
4525 alu.src[1].sel = tmp0;
4526 alu.src[1].chan = 1;
4527
4528 alu.last = 1;
4529 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4530 return r;
4531
4532 /* 13. tmp1.x = tmp0.w >= src2 = r >= src2 */
4533 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4534 alu.op = ALU_OP2_SETGE_UINT;
4535
4536 alu.dst.sel = tmp1;
4537 alu.dst.chan = 0;
4538 alu.dst.write = 1;
4539
4540 alu.src[0].sel = tmp0;
4541 alu.src[0].chan = 3;
4542 if (signed_op) {
4543 alu.src[1].sel = tmp2;
4544 alu.src[1].chan = 1;
4545 } else {
4546 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4547 }
4548
4549 alu.last = 1;
4550 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4551 return r;
4552
4553 /* 14. tmp1.y = src1 >= tmp0.y = r >= 0 */
4554 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4555 alu.op = ALU_OP2_SETGE_UINT;
4556
4557 alu.dst.sel = tmp1;
4558 alu.dst.chan = 1;
4559 alu.dst.write = 1;
4560
4561 if (signed_op) {
4562 alu.src[0].sel = tmp2;
4563 alu.src[0].chan = 0;
4564 } else {
4565 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4566 }
4567
4568 alu.src[1].sel = tmp0;
4569 alu.src[1].chan = 1;
4570
4571 alu.last = 1;
4572 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4573 return r;
4574
4575 if (mod) { /* UMOD */
4576
4577 /* 15. tmp1.z = tmp0.w - src2 = r - src2 */
4578 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4579 alu.op = ALU_OP2_SUB_INT;
4580
4581 alu.dst.sel = tmp1;
4582 alu.dst.chan = 2;
4583 alu.dst.write = 1;
4584
4585 alu.src[0].sel = tmp0;
4586 alu.src[0].chan = 3;
4587
4588 if (signed_op) {
4589 alu.src[1].sel = tmp2;
4590 alu.src[1].chan = 1;
4591 } else {
4592 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4593 }
4594
4595 alu.last = 1;
4596 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4597 return r;
4598
4599 /* 16. tmp1.w = tmp0.w + src2 = r + src2 */
4600 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4601 alu.op = ALU_OP2_ADD_INT;
4602
4603 alu.dst.sel = tmp1;
4604 alu.dst.chan = 3;
4605 alu.dst.write = 1;
4606
4607 alu.src[0].sel = tmp0;
4608 alu.src[0].chan = 3;
4609 if (signed_op) {
4610 alu.src[1].sel = tmp2;
4611 alu.src[1].chan = 1;
4612 } else {
4613 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4614 }
4615
4616 alu.last = 1;
4617 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4618 return r;
4619
4620 } else { /* UDIV */
4621
4622 /* 15. tmp1.z = tmp0.z + 1 = q + 1 DIV */
4623 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4624 alu.op = ALU_OP2_ADD_INT;
4625
4626 alu.dst.sel = tmp1;
4627 alu.dst.chan = 2;
4628 alu.dst.write = 1;
4629
4630 alu.src[0].sel = tmp0;
4631 alu.src[0].chan = 2;
4632 alu.src[1].sel = V_SQ_ALU_SRC_1_INT;
4633
4634 alu.last = 1;
4635 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4636 return r;
4637
4638 /* 16. tmp1.w = tmp0.z - 1 = q - 1 */
4639 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4640 alu.op = ALU_OP2_ADD_INT;
4641
4642 alu.dst.sel = tmp1;
4643 alu.dst.chan = 3;
4644 alu.dst.write = 1;
4645
4646 alu.src[0].sel = tmp0;
4647 alu.src[0].chan = 2;
4648 alu.src[1].sel = V_SQ_ALU_SRC_M_1_INT;
4649
4650 alu.last = 1;
4651 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4652 return r;
4653
4654 }
4655
4656 /* 17. tmp1.x = tmp1.x & tmp1.y */
4657 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4658 alu.op = ALU_OP2_AND_INT;
4659
4660 alu.dst.sel = tmp1;
4661 alu.dst.chan = 0;
4662 alu.dst.write = 1;
4663
4664 alu.src[0].sel = tmp1;
4665 alu.src[0].chan = 0;
4666 alu.src[1].sel = tmp1;
4667 alu.src[1].chan = 1;
4668
4669 alu.last = 1;
4670 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4671 return r;
4672
4673 /* 18. tmp0.z = tmp1.x==0 ? tmp0.z : tmp1.z DIV */
4674 /* 18. tmp0.z = tmp1.x==0 ? tmp0.w : tmp1.z MOD */
4675 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4676 alu.op = ALU_OP3_CNDE_INT;
4677 alu.is_op3 = 1;
4678
4679 alu.dst.sel = tmp0;
4680 alu.dst.chan = 2;
4681 alu.dst.write = 1;
4682
4683 alu.src[0].sel = tmp1;
4684 alu.src[0].chan = 0;
4685 alu.src[1].sel = tmp0;
4686 alu.src[1].chan = mod ? 3 : 2;
4687 alu.src[2].sel = tmp1;
4688 alu.src[2].chan = 2;
4689
4690 alu.last = 1;
4691 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4692 return r;
4693
4694 /* 19. tmp0.z = tmp1.y==0 ? tmp1.w : tmp0.z */
4695 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4696 alu.op = ALU_OP3_CNDE_INT;
4697 alu.is_op3 = 1;
4698
4699 if (signed_op) {
4700 alu.dst.sel = tmp0;
4701 alu.dst.chan = 2;
4702 alu.dst.write = 1;
4703 } else {
4704 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4705 }
4706
4707 alu.src[0].sel = tmp1;
4708 alu.src[0].chan = 1;
4709 alu.src[1].sel = tmp1;
4710 alu.src[1].chan = 3;
4711 alu.src[2].sel = tmp0;
4712 alu.src[2].chan = 2;
4713
4714 alu.last = 1;
4715 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4716 return r;
4717
4718 if (signed_op) {
4719
4720 /* fix the sign of the result */
4721
4722 if (mod) {
4723
4724 /* tmp0.x = -tmp0.z */
4725 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4726 alu.op = ALU_OP2_SUB_INT;
4727
4728 alu.dst.sel = tmp0;
4729 alu.dst.chan = 0;
4730 alu.dst.write = 1;
4731
4732 alu.src[0].sel = V_SQ_ALU_SRC_0;
4733 alu.src[1].sel = tmp0;
4734 alu.src[1].chan = 2;
4735
4736 alu.last = 1;
4737 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4738 return r;
4739
4740 /* sign of the remainder is the same as the sign of src0 */
4741 /* tmp0.x = src0>=0 ? tmp0.z : tmp0.x */
4742 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4743 alu.op = ALU_OP3_CNDGE_INT;
4744 alu.is_op3 = 1;
4745
4746 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4747
4748 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4749 alu.src[1].sel = tmp0;
4750 alu.src[1].chan = 2;
4751 alu.src[2].sel = tmp0;
4752 alu.src[2].chan = 0;
4753
4754 alu.last = 1;
4755 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4756 return r;
4757
4758 } else {
4759
4760 /* tmp0.x = -tmp0.z */
4761 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4762 alu.op = ALU_OP2_SUB_INT;
4763
4764 alu.dst.sel = tmp0;
4765 alu.dst.chan = 0;
4766 alu.dst.write = 1;
4767
4768 alu.src[0].sel = V_SQ_ALU_SRC_0;
4769 alu.src[1].sel = tmp0;
4770 alu.src[1].chan = 2;
4771
4772 alu.last = 1;
4773 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4774 return r;
4775
4776 /* fix the quotient sign (same as the sign of src0*src1) */
4777 /* tmp0.x = tmp2.z>=0 ? tmp0.z : tmp0.x */
4778 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4779 alu.op = ALU_OP3_CNDGE_INT;
4780 alu.is_op3 = 1;
4781
4782 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4783
4784 alu.src[0].sel = tmp2;
4785 alu.src[0].chan = 2;
4786 alu.src[1].sel = tmp0;
4787 alu.src[1].chan = 2;
4788 alu.src[2].sel = tmp0;
4789 alu.src[2].chan = 0;
4790
4791 alu.last = 1;
4792 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4793 return r;
4794 }
4795 }
4796 }
4797 return 0;
4798 }
4799
4800 static int tgsi_udiv(struct r600_shader_ctx *ctx)
4801 {
4802 return tgsi_divmod(ctx, 0, 0);
4803 }
4804
4805 static int tgsi_umod(struct r600_shader_ctx *ctx)
4806 {
4807 return tgsi_divmod(ctx, 1, 0);
4808 }
4809
4810 static int tgsi_idiv(struct r600_shader_ctx *ctx)
4811 {
4812 return tgsi_divmod(ctx, 0, 1);
4813 }
4814
4815 static int tgsi_imod(struct r600_shader_ctx *ctx)
4816 {
4817 return tgsi_divmod(ctx, 1, 1);
4818 }
4819
4820
4821 static int tgsi_f2i(struct r600_shader_ctx *ctx)
4822 {
4823 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4824 struct r600_bytecode_alu alu;
4825 int i, r;
4826 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4827 int last_inst = tgsi_last_instruction(write_mask);
4828
4829 for (i = 0; i < 4; i++) {
4830 if (!(write_mask & (1<<i)))
4831 continue;
4832
4833 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4834 alu.op = ALU_OP1_TRUNC;
4835
4836 alu.dst.sel = ctx->temp_reg;
4837 alu.dst.chan = i;
4838 alu.dst.write = 1;
4839
4840 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4841 if (i == last_inst)
4842 alu.last = 1;
4843 r = r600_bytecode_add_alu(ctx->bc, &alu);
4844 if (r)
4845 return r;
4846 }
4847
4848 for (i = 0; i < 4; i++) {
4849 if (!(write_mask & (1<<i)))
4850 continue;
4851
4852 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4853 alu.op = ctx->inst_info->op;
4854
4855 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4856
4857 alu.src[0].sel = ctx->temp_reg;
4858 alu.src[0].chan = i;
4859
4860 if (i == last_inst || alu.op == ALU_OP1_FLT_TO_UINT)
4861 alu.last = 1;
4862 r = r600_bytecode_add_alu(ctx->bc, &alu);
4863 if (r)
4864 return r;
4865 }
4866
4867 return 0;
4868 }
4869
4870 static int tgsi_iabs(struct r600_shader_ctx *ctx)
4871 {
4872 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4873 struct r600_bytecode_alu alu;
4874 int i, r;
4875 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4876 int last_inst = tgsi_last_instruction(write_mask);
4877
4878 /* tmp = -src */
4879 for (i = 0; i < 4; i++) {
4880 if (!(write_mask & (1<<i)))
4881 continue;
4882
4883 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4884 alu.op = ALU_OP2_SUB_INT;
4885
4886 alu.dst.sel = ctx->temp_reg;
4887 alu.dst.chan = i;
4888 alu.dst.write = 1;
4889
4890 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4891 alu.src[0].sel = V_SQ_ALU_SRC_0;
4892
4893 if (i == last_inst)
4894 alu.last = 1;
4895 r = r600_bytecode_add_alu(ctx->bc, &alu);
4896 if (r)
4897 return r;
4898 }
4899
4900 /* dst = (src >= 0 ? src : tmp) */
4901 for (i = 0; i < 4; i++) {
4902 if (!(write_mask & (1<<i)))
4903 continue;
4904
4905 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4906 alu.op = ALU_OP3_CNDGE_INT;
4907 alu.is_op3 = 1;
4908 alu.dst.write = 1;
4909
4910 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4911
4912 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4913 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4914 alu.src[2].sel = ctx->temp_reg;
4915 alu.src[2].chan = i;
4916
4917 if (i == last_inst)
4918 alu.last = 1;
4919 r = r600_bytecode_add_alu(ctx->bc, &alu);
4920 if (r)
4921 return r;
4922 }
4923 return 0;
4924 }
4925
4926 static int tgsi_issg(struct r600_shader_ctx *ctx)
4927 {
4928 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4929 struct r600_bytecode_alu alu;
4930 int i, r;
4931 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4932 int last_inst = tgsi_last_instruction(write_mask);
4933
4934 /* tmp = (src >= 0 ? src : -1) */
4935 for (i = 0; i < 4; i++) {
4936 if (!(write_mask & (1<<i)))
4937 continue;
4938
4939 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4940 alu.op = ALU_OP3_CNDGE_INT;
4941 alu.is_op3 = 1;
4942
4943 alu.dst.sel = ctx->temp_reg;
4944 alu.dst.chan = i;
4945 alu.dst.write = 1;
4946
4947 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4948 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4949 alu.src[2].sel = V_SQ_ALU_SRC_M_1_INT;
4950
4951 if (i == last_inst)
4952 alu.last = 1;
4953 r = r600_bytecode_add_alu(ctx->bc, &alu);
4954 if (r)
4955 return r;
4956 }
4957
4958 /* dst = (tmp > 0 ? 1 : tmp) */
4959 for (i = 0; i < 4; i++) {
4960 if (!(write_mask & (1<<i)))
4961 continue;
4962
4963 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4964 alu.op = ALU_OP3_CNDGT_INT;
4965 alu.is_op3 = 1;
4966 alu.dst.write = 1;
4967
4968 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4969
4970 alu.src[0].sel = ctx->temp_reg;
4971 alu.src[0].chan = i;
4972
4973 alu.src[1].sel = V_SQ_ALU_SRC_1_INT;
4974
4975 alu.src[2].sel = ctx->temp_reg;
4976 alu.src[2].chan = i;
4977
4978 if (i == last_inst)
4979 alu.last = 1;
4980 r = r600_bytecode_add_alu(ctx->bc, &alu);
4981 if (r)
4982 return r;
4983 }
4984 return 0;
4985 }
4986
4987
4988
4989 static int tgsi_ssg(struct r600_shader_ctx *ctx)
4990 {
4991 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4992 struct r600_bytecode_alu alu;
4993 int i, r;
4994
4995 /* tmp = (src > 0 ? 1 : src) */
4996 for (i = 0; i < 4; i++) {
4997 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4998 alu.op = ALU_OP3_CNDGT;
4999 alu.is_op3 = 1;
5000
5001 alu.dst.sel = ctx->temp_reg;
5002 alu.dst.chan = i;
5003
5004 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5005 alu.src[1].sel = V_SQ_ALU_SRC_1;
5006 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
5007
5008 if (i == 3)
5009 alu.last = 1;
5010 r = r600_bytecode_add_alu(ctx->bc, &alu);
5011 if (r)
5012 return r;
5013 }
5014
5015 /* dst = (-tmp > 0 ? -1 : tmp) */
5016 for (i = 0; i < 4; i++) {
5017 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5018 alu.op = ALU_OP3_CNDGT;
5019 alu.is_op3 = 1;
5020 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5021
5022 alu.src[0].sel = ctx->temp_reg;
5023 alu.src[0].chan = i;
5024 alu.src[0].neg = 1;
5025
5026 alu.src[1].sel = V_SQ_ALU_SRC_1;
5027 alu.src[1].neg = 1;
5028
5029 alu.src[2].sel = ctx->temp_reg;
5030 alu.src[2].chan = i;
5031
5032 if (i == 3)
5033 alu.last = 1;
5034 r = r600_bytecode_add_alu(ctx->bc, &alu);
5035 if (r)
5036 return r;
5037 }
5038 return 0;
5039 }
5040
5041 static int tgsi_bfi(struct r600_shader_ctx *ctx)
5042 {
5043 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5044 struct r600_bytecode_alu alu;
5045 int i, r, t1, t2;
5046
5047 unsigned write_mask = inst->Dst[0].Register.WriteMask;
5048 int last_inst = tgsi_last_instruction(write_mask);
5049
5050 t1 = ctx->temp_reg;
5051
5052 for (i = 0; i < 4; i++) {
5053 if (!(write_mask & (1<<i)))
5054 continue;
5055
5056 /* create mask tmp */
5057 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5058 alu.op = ALU_OP2_BFM_INT;
5059 alu.dst.sel = t1;
5060 alu.dst.chan = i;
5061 alu.dst.write = 1;
5062 alu.last = i == last_inst;
5063
5064 r600_bytecode_src(&alu.src[0], &ctx->src[3], i);
5065 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
5066
5067 r = r600_bytecode_add_alu(ctx->bc, &alu);
5068 if (r)
5069 return r;
5070 }
5071
5072 t2 = r600_get_temp(ctx);
5073
5074 for (i = 0; i < 4; i++) {
5075 if (!(write_mask & (1<<i)))
5076 continue;
5077
5078 /* shift insert left */
5079 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5080 alu.op = ALU_OP2_LSHL_INT;
5081 alu.dst.sel = t2;
5082 alu.dst.chan = i;
5083 alu.dst.write = 1;
5084 alu.last = i == last_inst;
5085
5086 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
5087 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
5088
5089 r = r600_bytecode_add_alu(ctx->bc, &alu);
5090 if (r)
5091 return r;
5092 }
5093
5094 for (i = 0; i < 4; i++) {
5095 if (!(write_mask & (1<<i)))
5096 continue;
5097
5098 /* actual bitfield insert */
5099 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5100 alu.op = ALU_OP3_BFI_INT;
5101 alu.is_op3 = 1;
5102 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5103 alu.dst.chan = i;
5104 alu.dst.write = 1;
5105 alu.last = i == last_inst;
5106
5107 alu.src[0].sel = t1;
5108 alu.src[0].chan = i;
5109 alu.src[1].sel = t2;
5110 alu.src[1].chan = i;
5111 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
5112
5113 r = r600_bytecode_add_alu(ctx->bc, &alu);
5114 if (r)
5115 return r;
5116 }
5117
5118 return 0;
5119 }
5120
5121 static int tgsi_msb(struct r600_shader_ctx *ctx)
5122 {
5123 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5124 struct r600_bytecode_alu alu;
5125 int i, r, t1, t2;
5126
5127 unsigned write_mask = inst->Dst[0].Register.WriteMask;
5128 int last_inst = tgsi_last_instruction(write_mask);
5129
5130 assert(ctx->inst_info->op == ALU_OP1_FFBH_INT ||
5131 ctx->inst_info->op == ALU_OP1_FFBH_UINT);
5132
5133 t1 = ctx->temp_reg;
5134
5135 /* bit position is indexed from lsb by TGSI, and from msb by the hardware */
5136 for (i = 0; i < 4; i++) {
5137 if (!(write_mask & (1<<i)))
5138 continue;
5139
5140 /* t1 = FFBH_INT / FFBH_UINT */
5141 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5142 alu.op = ctx->inst_info->op;
5143 alu.dst.sel = t1;
5144 alu.dst.chan = i;
5145 alu.dst.write = 1;
5146 alu.last = i == last_inst;
5147
5148 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5149
5150 r = r600_bytecode_add_alu(ctx->bc, &alu);
5151 if (r)
5152 return r;
5153 }
5154
5155 t2 = r600_get_temp(ctx);
5156
5157 for (i = 0; i < 4; i++) {
5158 if (!(write_mask & (1<<i)))
5159 continue;
5160
5161 /* t2 = 31 - t1 */
5162 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5163 alu.op = ALU_OP2_SUB_INT;
5164 alu.dst.sel = t2;
5165 alu.dst.chan = i;
5166 alu.dst.write = 1;
5167 alu.last = i == last_inst;
5168
5169 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
5170 alu.src[0].value = 31;
5171 alu.src[1].sel = t1;
5172 alu.src[1].chan = i;
5173
5174 r = r600_bytecode_add_alu(ctx->bc, &alu);
5175 if (r)
5176 return r;
5177 }
5178
5179 for (i = 0; i < 4; i++) {
5180 if (!(write_mask & (1<<i)))
5181 continue;
5182
5183 /* result = t1 >= 0 ? t2 : t1 */
5184 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5185 alu.op = ALU_OP3_CNDGE_INT;
5186 alu.is_op3 = 1;
5187 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5188 alu.dst.chan = i;
5189 alu.dst.write = 1;
5190 alu.last = i == last_inst;
5191
5192 alu.src[0].sel = t1;
5193 alu.src[0].chan = i;
5194 alu.src[1].sel = t2;
5195 alu.src[1].chan = i;
5196 alu.src[2].sel = t1;
5197 alu.src[2].chan = i;
5198
5199 r = r600_bytecode_add_alu(ctx->bc, &alu);
5200 if (r)
5201 return r;
5202 }
5203
5204 return 0;
5205 }
5206
5207 static int tgsi_interp_egcm(struct r600_shader_ctx *ctx)
5208 {
5209 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5210 struct r600_bytecode_alu alu;
5211 int r, i = 0, k, interp_gpr, interp_base_chan, tmp, lasti;
5212 unsigned location;
5213 int input;
5214
5215 assert(inst->Src[0].Register.File == TGSI_FILE_INPUT);
5216
5217 input = inst->Src[0].Register.Index;
5218
5219 /* Interpolators have been marked for use already by allocate_system_value_inputs */
5220 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
5221 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5222 location = TGSI_INTERPOLATE_LOC_CENTER; /* sample offset will be added explicitly */
5223 }
5224 else {
5225 location = TGSI_INTERPOLATE_LOC_CENTROID;
5226 }
5227
5228 k = eg_get_interpolator_index(ctx->shader->input[input].interpolate, location);
5229 if (k < 0)
5230 k = 0;
5231 interp_gpr = ctx->eg_interpolators[k].ij_index / 2;
5232 interp_base_chan = 2 * (ctx->eg_interpolators[k].ij_index % 2);
5233
5234 /* NOTE: currently offset is not perspective correct */
5235 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
5236 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5237 int sample_gpr = -1;
5238 int gradientsH, gradientsV;
5239 struct r600_bytecode_tex tex;
5240
5241 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5242 sample_gpr = load_sample_position(ctx, &ctx->src[1], ctx->src[1].swizzle[0]);
5243 }
5244
5245 gradientsH = r600_get_temp(ctx);
5246 gradientsV = r600_get_temp(ctx);
5247 for (i = 0; i < 2; i++) {
5248 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
5249 tex.op = i == 0 ? FETCH_OP_GET_GRADIENTS_H : FETCH_OP_GET_GRADIENTS_V;
5250 tex.src_gpr = interp_gpr;
5251 tex.src_sel_x = interp_base_chan + 0;
5252 tex.src_sel_y = interp_base_chan + 1;
5253 tex.src_sel_z = 0;
5254 tex.src_sel_w = 0;
5255 tex.dst_gpr = i == 0 ? gradientsH : gradientsV;
5256 tex.dst_sel_x = 0;
5257 tex.dst_sel_y = 1;
5258 tex.dst_sel_z = 7;
5259 tex.dst_sel_w = 7;
5260 tex.inst_mod = 1; // Use per pixel gradient calculation
5261 tex.sampler_id = 0;
5262 tex.resource_id = tex.sampler_id;
5263 r = r600_bytecode_add_tex(ctx->bc, &tex);
5264 if (r)
5265 return r;
5266 }
5267
5268 for (i = 0; i < 2; i++) {
5269 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5270 alu.op = ALU_OP3_MULADD;
5271 alu.is_op3 = 1;
5272 alu.src[0].sel = gradientsH;
5273 alu.src[0].chan = i;
5274 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5275 alu.src[1].sel = sample_gpr;
5276 alu.src[1].chan = 2;
5277 }
5278 else {
5279 r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
5280 }
5281 alu.src[2].sel = interp_gpr;
5282 alu.src[2].chan = interp_base_chan + i;
5283 alu.dst.sel = ctx->temp_reg;
5284 alu.dst.chan = i;
5285 alu.last = i == 1;
5286
5287 r = r600_bytecode_add_alu(ctx->bc, &alu);
5288 if (r)
5289 return r;
5290 }
5291
5292 for (i = 0; i < 2; i++) {
5293 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5294 alu.op = ALU_OP3_MULADD;
5295 alu.is_op3 = 1;
5296 alu.src[0].sel = gradientsV;
5297 alu.src[0].chan = i;
5298 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5299 alu.src[1].sel = sample_gpr;
5300 alu.src[1].chan = 3;
5301 }
5302 else {
5303 r600_bytecode_src(&alu.src[1], &ctx->src[1], 1);
5304 }
5305 alu.src[2].sel = ctx->temp_reg;
5306 alu.src[2].chan = i;
5307 alu.dst.sel = ctx->temp_reg;
5308 alu.dst.chan = i;
5309 alu.last = i == 1;
5310
5311 r = r600_bytecode_add_alu(ctx->bc, &alu);
5312 if (r)
5313 return r;
5314 }
5315 }
5316
5317 tmp = r600_get_temp(ctx);
5318 for (i = 0; i < 8; i++) {
5319 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5320 alu.op = i < 4 ? ALU_OP2_INTERP_ZW : ALU_OP2_INTERP_XY;
5321
5322 alu.dst.sel = tmp;
5323 if ((i > 1 && i < 6)) {
5324 alu.dst.write = 1;
5325 }
5326 else {
5327 alu.dst.write = 0;
5328 }
5329 alu.dst.chan = i % 4;
5330
5331 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
5332 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5333 alu.src[0].sel = ctx->temp_reg;
5334 alu.src[0].chan = 1 - (i % 2);
5335 } else {
5336 alu.src[0].sel = interp_gpr;
5337 alu.src[0].chan = interp_base_chan + 1 - (i % 2);
5338 }
5339 alu.src[1].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
5340 alu.src[1].chan = 0;
5341
5342 alu.last = i % 4 == 3;
5343 alu.bank_swizzle_force = SQ_ALU_VEC_210;
5344
5345 r = r600_bytecode_add_alu(ctx->bc, &alu);
5346 if (r)
5347 return r;
5348 }
5349
5350 // INTERP can't swizzle dst
5351 lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
5352 for (i = 0; i <= lasti; i++) {
5353 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
5354 continue;
5355
5356 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5357 alu.op = ALU_OP1_MOV;
5358 alu.src[0].sel = tmp;
5359 alu.src[0].chan = ctx->src[0].swizzle[i];
5360 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5361 alu.dst.write = 1;
5362 alu.last = i == lasti;
5363 r = r600_bytecode_add_alu(ctx->bc, &alu);
5364 if (r)
5365 return r;
5366 }
5367
5368 return 0;
5369 }
5370
5371
5372 static int tgsi_helper_copy(struct r600_shader_ctx *ctx, struct tgsi_full_instruction *inst)
5373 {
5374 struct r600_bytecode_alu alu;
5375 int i, r;
5376
5377 for (i = 0; i < 4; i++) {
5378 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5379 if (!(inst->Dst[0].Register.WriteMask & (1 << i))) {
5380 alu.op = ALU_OP0_NOP;
5381 alu.dst.chan = i;
5382 } else {
5383 alu.op = ALU_OP1_MOV;
5384 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5385 alu.src[0].sel = ctx->temp_reg;
5386 alu.src[0].chan = i;
5387 }
5388 if (i == 3) {
5389 alu.last = 1;
5390 }
5391 r = r600_bytecode_add_alu(ctx->bc, &alu);
5392 if (r)
5393 return r;
5394 }
5395 return 0;
5396 }
5397
5398 static int tgsi_make_src_for_op3(struct r600_shader_ctx *ctx,
5399 unsigned temp, int chan,
5400 struct r600_bytecode_alu_src *bc_src,
5401 const struct r600_shader_src *shader_src)
5402 {
5403 struct r600_bytecode_alu alu;
5404 int r;
5405
5406 r600_bytecode_src(bc_src, shader_src, chan);
5407
5408 /* op3 operands don't support abs modifier */
5409 if (bc_src->abs) {
5410 assert(temp!=0); /* we actually need the extra register, make sure it is allocated. */
5411 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5412 alu.op = ALU_OP1_MOV;
5413 alu.dst.sel = temp;
5414 alu.dst.chan = chan;
5415 alu.dst.write = 1;
5416
5417 alu.src[0] = *bc_src;
5418 alu.last = true; // sufficient?
5419 r = r600_bytecode_add_alu(ctx->bc, &alu);
5420 if (r)
5421 return r;
5422
5423 memset(bc_src, 0, sizeof(*bc_src));
5424 bc_src->sel = temp;
5425 bc_src->chan = chan;
5426 }
5427 return 0;
5428 }
5429
5430 static int tgsi_op3(struct r600_shader_ctx *ctx)
5431 {
5432 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5433 struct r600_bytecode_alu alu;
5434 int i, j, r;
5435 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
5436 int temp_regs[4];
5437
5438 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
5439 temp_regs[j] = 0;
5440 if (ctx->src[j].abs)
5441 temp_regs[j] = r600_get_temp(ctx);
5442 }
5443 for (i = 0; i < lasti + 1; i++) {
5444 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
5445 continue;
5446
5447 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5448 alu.op = ctx->inst_info->op;
5449 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
5450 r = tgsi_make_src_for_op3(ctx, temp_regs[j], i, &alu.src[j], &ctx->src[j]);
5451 if (r)
5452 return r;
5453 }
5454
5455 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5456 alu.dst.chan = i;
5457 alu.dst.write = 1;
5458 alu.is_op3 = 1;
5459 if (i == lasti) {
5460 alu.last = 1;
5461 }
5462 r = r600_bytecode_add_alu(ctx->bc, &alu);
5463 if (r)
5464 return r;
5465 }
5466 return 0;
5467 }
5468
5469 static int tgsi_dp(struct r600_shader_ctx *ctx)
5470 {
5471 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5472 struct r600_bytecode_alu alu;
5473 int i, j, r;
5474
5475 for (i = 0; i < 4; i++) {
5476 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5477 alu.op = ctx->inst_info->op;
5478 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
5479 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
5480 }
5481
5482 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5483 alu.dst.chan = i;
5484 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
5485 /* handle some special cases */
5486 switch (inst->Instruction.Opcode) {
5487 case TGSI_OPCODE_DP2:
5488 if (i > 1) {
5489 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
5490 alu.src[0].chan = alu.src[1].chan = 0;
5491 }
5492 break;
5493 case TGSI_OPCODE_DP3:
5494 if (i > 2) {
5495 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
5496 alu.src[0].chan = alu.src[1].chan = 0;
5497 }
5498 break;
5499 case TGSI_OPCODE_DPH:
5500 if (i == 3) {
5501 alu.src[0].sel = V_SQ_ALU_SRC_1;
5502 alu.src[0].chan = 0;
5503 alu.src[0].neg = 0;
5504 }
5505 break;
5506 default:
5507 break;
5508 }
5509 if (i == 3) {
5510 alu.last = 1;
5511 }
5512 r = r600_bytecode_add_alu(ctx->bc, &alu);
5513 if (r)
5514 return r;
5515 }
5516 return 0;
5517 }
5518
5519 static inline boolean tgsi_tex_src_requires_loading(struct r600_shader_ctx *ctx,
5520 unsigned index)
5521 {
5522 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5523 return (inst->Src[index].Register.File != TGSI_FILE_TEMPORARY &&
5524 inst->Src[index].Register.File != TGSI_FILE_INPUT &&
5525 inst->Src[index].Register.File != TGSI_FILE_OUTPUT) ||
5526 ctx->src[index].neg || ctx->src[index].abs ||
5527 (inst->Src[index].Register.File == TGSI_FILE_INPUT && ctx->type == TGSI_PROCESSOR_GEOMETRY);
5528 }
5529
5530 static inline unsigned tgsi_tex_get_src_gpr(struct r600_shader_ctx *ctx,
5531 unsigned index)
5532 {
5533 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5534 return ctx->file_offset[inst->Src[index].Register.File] + inst->Src[index].Register.Index;
5535 }
5536
5537 static int do_vtx_fetch_inst(struct r600_shader_ctx *ctx, boolean src_requires_loading)
5538 {
5539 struct r600_bytecode_vtx vtx;
5540 struct r600_bytecode_alu alu;
5541 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5542 int src_gpr, r, i;
5543 int id = tgsi_tex_get_src_gpr(ctx, 1);
5544
5545 src_gpr = tgsi_tex_get_src_gpr(ctx, 0);
5546 if (src_requires_loading) {
5547 for (i = 0; i < 4; i++) {
5548 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5549 alu.op = ALU_OP1_MOV;
5550 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5551 alu.dst.sel = ctx->temp_reg;
5552 alu.dst.chan = i;
5553 if (i == 3)
5554 alu.last = 1;
5555 alu.dst.write = 1;
5556 r = r600_bytecode_add_alu(ctx->bc, &alu);
5557 if (r)
5558 return r;
5559 }
5560 src_gpr = ctx->temp_reg;
5561 }
5562
5563 memset(&vtx, 0, sizeof(vtx));
5564 vtx.op = FETCH_OP_VFETCH;
5565 vtx.buffer_id = id + R600_MAX_CONST_BUFFERS;
5566 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
5567 vtx.src_gpr = src_gpr;
5568 vtx.mega_fetch_count = 16;
5569 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
5570 vtx.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; /* SEL_X */
5571 vtx.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7; /* SEL_Y */
5572 vtx.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7; /* SEL_Z */
5573 vtx.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7; /* SEL_W */
5574 vtx.use_const_fields = 1;
5575
5576 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
5577 return r;
5578
5579 if (ctx->bc->chip_class >= EVERGREEN)
5580 return 0;
5581
5582 for (i = 0; i < 4; i++) {
5583 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
5584 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
5585 continue;
5586
5587 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5588 alu.op = ALU_OP2_AND_INT;
5589
5590 alu.dst.chan = i;
5591 alu.dst.sel = vtx.dst_gpr;
5592 alu.dst.write = 1;
5593
5594 alu.src[0].sel = vtx.dst_gpr;
5595 alu.src[0].chan = i;
5596
5597 alu.src[1].sel = R600_SHADER_BUFFER_INFO_SEL;
5598 alu.src[1].sel += (id * 2);
5599 alu.src[1].chan = i % 4;
5600 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
5601
5602 if (i == lasti)
5603 alu.last = 1;
5604 r = r600_bytecode_add_alu(ctx->bc, &alu);
5605 if (r)
5606 return r;
5607 }
5608
5609 if (inst->Dst[0].Register.WriteMask & 3) {
5610 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5611 alu.op = ALU_OP2_OR_INT;
5612
5613 alu.dst.chan = 3;
5614 alu.dst.sel = vtx.dst_gpr;
5615 alu.dst.write = 1;
5616
5617 alu.src[0].sel = vtx.dst_gpr;
5618 alu.src[0].chan = 3;
5619
5620 alu.src[1].sel = R600_SHADER_BUFFER_INFO_SEL + (id * 2) + 1;
5621 alu.src[1].chan = 0;
5622 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
5623
5624 alu.last = 1;
5625 r = r600_bytecode_add_alu(ctx->bc, &alu);
5626 if (r)
5627 return r;
5628 }
5629 return 0;
5630 }
5631
5632 static int r600_do_buffer_txq(struct r600_shader_ctx *ctx)
5633 {
5634 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5635 struct r600_bytecode_alu alu;
5636 int r;
5637 int id = tgsi_tex_get_src_gpr(ctx, 1);
5638
5639 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5640 alu.op = ALU_OP1_MOV;
5641 alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
5642 if (ctx->bc->chip_class >= EVERGREEN) {
5643 /* channel 0 or 2 of each word */
5644 alu.src[0].sel += (id / 2);
5645 alu.src[0].chan = (id % 2) * 2;
5646 } else {
5647 /* r600 we have them at channel 2 of the second dword */
5648 alu.src[0].sel += (id * 2) + 1;
5649 alu.src[0].chan = 1;
5650 }
5651 alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
5652 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
5653 alu.last = 1;
5654 r = r600_bytecode_add_alu(ctx->bc, &alu);
5655 if (r)
5656 return r;
5657 return 0;
5658 }
5659
5660 static int tgsi_tex(struct r600_shader_ctx *ctx)
5661 {
5662 static float one_point_five = 1.5f;
5663 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5664 struct r600_bytecode_tex tex;
5665 struct r600_bytecode_alu alu;
5666 unsigned src_gpr;
5667 int r, i, j;
5668 int opcode;
5669 bool read_compressed_msaa = ctx->bc->has_compressed_msaa_texturing &&
5670 inst->Instruction.Opcode == TGSI_OPCODE_TXF &&
5671 (inst->Texture.Texture == TGSI_TEXTURE_2D_MSAA ||
5672 inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY_MSAA);
5673
5674 bool txf_add_offsets = inst->Texture.NumOffsets &&
5675 inst->Instruction.Opcode == TGSI_OPCODE_TXF &&
5676 inst->Texture.Texture != TGSI_TEXTURE_BUFFER;
5677
5678 /* Texture fetch instructions can only use gprs as source.
5679 * Also they cannot negate the source or take the absolute value */
5680 const boolean src_requires_loading = (inst->Instruction.Opcode != TGSI_OPCODE_TXQ_LZ &&
5681 inst->Instruction.Opcode != TGSI_OPCODE_TXQS &&
5682 tgsi_tex_src_requires_loading(ctx, 0)) ||
5683 read_compressed_msaa || txf_add_offsets;
5684
5685 boolean src_loaded = FALSE;
5686 unsigned sampler_src_reg = inst->Instruction.Opcode == TGSI_OPCODE_TXQ_LZ ? 0 : 1;
5687 int8_t offset_x = 0, offset_y = 0, offset_z = 0;
5688 boolean has_txq_cube_array_z = false;
5689 unsigned sampler_index_mode;
5690
5691 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ &&
5692 ((inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
5693 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY)))
5694 if (inst->Dst[0].Register.WriteMask & 4) {
5695 ctx->shader->has_txq_cube_array_z_comp = true;
5696 has_txq_cube_array_z = true;
5697 }
5698
5699 if (inst->Instruction.Opcode == TGSI_OPCODE_TEX2 ||
5700 inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
5701 inst->Instruction.Opcode == TGSI_OPCODE_TXL2 ||
5702 inst->Instruction.Opcode == TGSI_OPCODE_TG4)
5703 sampler_src_reg = 2;
5704
5705 /* TGSI moves the sampler to src reg 3 for TXD */
5706 if (inst->Instruction.Opcode == TGSI_OPCODE_TXD)
5707 sampler_src_reg = 3;
5708
5709 sampler_index_mode = inst->Src[sampler_src_reg].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
5710
5711 src_gpr = tgsi_tex_get_src_gpr(ctx, 0);
5712
5713 if (inst->Texture.Texture == TGSI_TEXTURE_BUFFER) {
5714 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ) {
5715 ctx->shader->uses_tex_buffers = true;
5716 return r600_do_buffer_txq(ctx);
5717 }
5718 else if (inst->Instruction.Opcode == TGSI_OPCODE_TXF) {
5719 if (ctx->bc->chip_class < EVERGREEN)
5720 ctx->shader->uses_tex_buffers = true;
5721 return do_vtx_fetch_inst(ctx, src_requires_loading);
5722 }
5723 }
5724
5725 if (inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
5726 int out_chan;
5727 /* Add perspective divide */
5728 if (ctx->bc->chip_class == CAYMAN) {
5729 out_chan = 2;
5730 for (i = 0; i < 3; i++) {
5731 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5732 alu.op = ALU_OP1_RECIP_IEEE;
5733 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
5734
5735 alu.dst.sel = ctx->temp_reg;
5736 alu.dst.chan = i;
5737 if (i == 2)
5738 alu.last = 1;
5739 if (out_chan == i)
5740 alu.dst.write = 1;
5741 r = r600_bytecode_add_alu(ctx->bc, &alu);
5742 if (r)
5743 return r;
5744 }
5745
5746 } else {
5747 out_chan = 3;
5748 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5749 alu.op = ALU_OP1_RECIP_IEEE;
5750 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
5751
5752 alu.dst.sel = ctx->temp_reg;
5753 alu.dst.chan = out_chan;
5754 alu.last = 1;
5755 alu.dst.write = 1;
5756 r = r600_bytecode_add_alu(ctx->bc, &alu);
5757 if (r)
5758 return r;
5759 }
5760
5761 for (i = 0; i < 3; i++) {
5762 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5763 alu.op = ALU_OP2_MUL;
5764 alu.src[0].sel = ctx->temp_reg;
5765 alu.src[0].chan = out_chan;
5766 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
5767 alu.dst.sel = ctx->temp_reg;
5768 alu.dst.chan = i;
5769 alu.dst.write = 1;
5770 r = r600_bytecode_add_alu(ctx->bc, &alu);
5771 if (r)
5772 return r;
5773 }
5774 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5775 alu.op = ALU_OP1_MOV;
5776 alu.src[0].sel = V_SQ_ALU_SRC_1;
5777 alu.src[0].chan = 0;
5778 alu.dst.sel = ctx->temp_reg;
5779 alu.dst.chan = 3;
5780 alu.last = 1;
5781 alu.dst.write = 1;
5782 r = r600_bytecode_add_alu(ctx->bc, &alu);
5783 if (r)
5784 return r;
5785 src_loaded = TRUE;
5786 src_gpr = ctx->temp_reg;
5787 }
5788
5789
5790 if ((inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
5791 inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
5792 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
5793 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) &&
5794 inst->Instruction.Opcode != TGSI_OPCODE_TXQ &&
5795 inst->Instruction.Opcode != TGSI_OPCODE_TXQ_LZ) {
5796
5797 static const unsigned src0_swizzle[] = {2, 2, 0, 1};
5798 static const unsigned src1_swizzle[] = {1, 0, 2, 2};
5799
5800 /* tmp1.xyzw = CUBE(R0.zzxy, R0.yxzz) */
5801 for (i = 0; i < 4; i++) {
5802 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5803 alu.op = ALU_OP2_CUBE;
5804 r600_bytecode_src(&alu.src[0], &ctx->src[0], src0_swizzle[i]);
5805 r600_bytecode_src(&alu.src[1], &ctx->src[0], src1_swizzle[i]);
5806 alu.dst.sel = ctx->temp_reg;
5807 alu.dst.chan = i;
5808 if (i == 3)
5809 alu.last = 1;
5810 alu.dst.write = 1;
5811 r = r600_bytecode_add_alu(ctx->bc, &alu);
5812 if (r)
5813 return r;
5814 }
5815
5816 /* tmp1.z = RCP_e(|tmp1.z|) */
5817 if (ctx->bc->chip_class == CAYMAN) {
5818 for (i = 0; i < 3; i++) {
5819 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5820 alu.op = ALU_OP1_RECIP_IEEE;
5821 alu.src[0].sel = ctx->temp_reg;
5822 alu.src[0].chan = 2;
5823 alu.src[0].abs = 1;
5824 alu.dst.sel = ctx->temp_reg;
5825 alu.dst.chan = i;
5826 if (i == 2)
5827 alu.dst.write = 1;
5828 if (i == 2)
5829 alu.last = 1;
5830 r = r600_bytecode_add_alu(ctx->bc, &alu);
5831 if (r)
5832 return r;
5833 }
5834 } else {
5835 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5836 alu.op = ALU_OP1_RECIP_IEEE;
5837 alu.src[0].sel = ctx->temp_reg;
5838 alu.src[0].chan = 2;
5839 alu.src[0].abs = 1;
5840 alu.dst.sel = ctx->temp_reg;
5841 alu.dst.chan = 2;
5842 alu.dst.write = 1;
5843 alu.last = 1;
5844 r = r600_bytecode_add_alu(ctx->bc, &alu);
5845 if (r)
5846 return r;
5847 }
5848
5849 /* MULADD R0.x, R0.x, PS1, (0x3FC00000, 1.5f).x
5850 * MULADD R0.y, R0.y, PS1, (0x3FC00000, 1.5f).x
5851 * muladd has no writemask, have to use another temp
5852 */
5853 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5854 alu.op = ALU_OP3_MULADD;
5855 alu.is_op3 = 1;
5856
5857 alu.src[0].sel = ctx->temp_reg;
5858 alu.src[0].chan = 0;
5859 alu.src[1].sel = ctx->temp_reg;
5860 alu.src[1].chan = 2;
5861
5862 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
5863 alu.src[2].chan = 0;
5864 alu.src[2].value = *(uint32_t *)&one_point_five;
5865
5866 alu.dst.sel = ctx->temp_reg;
5867 alu.dst.chan = 0;
5868 alu.dst.write = 1;
5869
5870 r = r600_bytecode_add_alu(ctx->bc, &alu);
5871 if (r)
5872 return r;
5873
5874 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5875 alu.op = ALU_OP3_MULADD;
5876 alu.is_op3 = 1;
5877
5878 alu.src[0].sel = ctx->temp_reg;
5879 alu.src[0].chan = 1;
5880 alu.src[1].sel = ctx->temp_reg;
5881 alu.src[1].chan = 2;
5882
5883 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
5884 alu.src[2].chan = 0;
5885 alu.src[2].value = *(uint32_t *)&one_point_five;
5886
5887 alu.dst.sel = ctx->temp_reg;
5888 alu.dst.chan = 1;
5889 alu.dst.write = 1;
5890
5891 alu.last = 1;
5892 r = r600_bytecode_add_alu(ctx->bc, &alu);
5893 if (r)
5894 return r;
5895 /* write initial compare value into Z component
5896 - W src 0 for shadow cube
5897 - X src 1 for shadow cube array */
5898 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
5899 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
5900 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5901 alu.op = ALU_OP1_MOV;
5902 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY)
5903 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
5904 else
5905 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
5906 alu.dst.sel = ctx->temp_reg;
5907 alu.dst.chan = 2;
5908 alu.dst.write = 1;
5909 alu.last = 1;
5910 r = r600_bytecode_add_alu(ctx->bc, &alu);
5911 if (r)
5912 return r;
5913 }
5914
5915 if (inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
5916 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
5917 if (ctx->bc->chip_class >= EVERGREEN) {
5918 int mytmp = r600_get_temp(ctx);
5919 static const float eight = 8.0f;
5920 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5921 alu.op = ALU_OP1_MOV;
5922 alu.src[0].sel = ctx->temp_reg;
5923 alu.src[0].chan = 3;
5924 alu.dst.sel = mytmp;
5925 alu.dst.chan = 0;
5926 alu.dst.write = 1;
5927 alu.last = 1;
5928 r = r600_bytecode_add_alu(ctx->bc, &alu);
5929 if (r)
5930 return r;
5931
5932 /* have to multiply original layer by 8 and add to face id (temp.w) in Z */
5933 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5934 alu.op = ALU_OP3_MULADD;
5935 alu.is_op3 = 1;
5936 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
5937 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
5938 alu.src[1].chan = 0;
5939 alu.src[1].value = *(uint32_t *)&eight;
5940 alu.src[2].sel = mytmp;
5941 alu.src[2].chan = 0;
5942 alu.dst.sel = ctx->temp_reg;
5943 alu.dst.chan = 3;
5944 alu.dst.write = 1;
5945 alu.last = 1;
5946 r = r600_bytecode_add_alu(ctx->bc, &alu);
5947 if (r)
5948 return r;
5949 } else if (ctx->bc->chip_class < EVERGREEN) {
5950 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
5951 tex.op = FETCH_OP_SET_CUBEMAP_INDEX;
5952 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
5953 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
5954 tex.src_gpr = r600_get_temp(ctx);
5955 tex.src_sel_x = 0;
5956 tex.src_sel_y = 0;
5957 tex.src_sel_z = 0;
5958 tex.src_sel_w = 0;
5959 tex.dst_sel_x = tex.dst_sel_y = tex.dst_sel_z = tex.dst_sel_w = 7;
5960 tex.coord_type_x = 1;
5961 tex.coord_type_y = 1;
5962 tex.coord_type_z = 1;
5963 tex.coord_type_w = 1;
5964 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5965 alu.op = ALU_OP1_MOV;
5966 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
5967 alu.dst.sel = tex.src_gpr;
5968 alu.dst.chan = 0;
5969 alu.last = 1;
5970 alu.dst.write = 1;
5971 r = r600_bytecode_add_alu(ctx->bc, &alu);
5972 if (r)
5973 return r;
5974
5975 r = r600_bytecode_add_tex(ctx->bc, &tex);
5976 if (r)
5977 return r;
5978 }
5979
5980 }
5981
5982 /* for cube forms of lod and bias we need to route things */
5983 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB ||
5984 inst->Instruction.Opcode == TGSI_OPCODE_TXL ||
5985 inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
5986 inst->Instruction.Opcode == TGSI_OPCODE_TXL2) {
5987 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5988 alu.op = ALU_OP1_MOV;
5989 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
5990 inst->Instruction.Opcode == TGSI_OPCODE_TXL2)
5991 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
5992 else
5993 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
5994 alu.dst.sel = ctx->temp_reg;
5995 alu.dst.chan = 2;
5996 alu.last = 1;
5997 alu.dst.write = 1;
5998 r = r600_bytecode_add_alu(ctx->bc, &alu);
5999 if (r)
6000 return r;
6001 }
6002
6003 src_loaded = TRUE;
6004 src_gpr = ctx->temp_reg;
6005 }
6006
6007 if (inst->Instruction.Opcode == TGSI_OPCODE_TXD) {
6008 int temp_h = 0, temp_v = 0;
6009 int start_val = 0;
6010
6011 /* if we've already loaded the src (i.e. CUBE don't reload it). */
6012 if (src_loaded == TRUE)
6013 start_val = 1;
6014 else
6015 src_loaded = TRUE;
6016 for (i = start_val; i < 3; i++) {
6017 int treg = r600_get_temp(ctx);
6018
6019 if (i == 0)
6020 src_gpr = treg;
6021 else if (i == 1)
6022 temp_h = treg;
6023 else
6024 temp_v = treg;
6025
6026 for (j = 0; j < 4; j++) {
6027 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6028 alu.op = ALU_OP1_MOV;
6029 r600_bytecode_src(&alu.src[0], &ctx->src[i], j);
6030 alu.dst.sel = treg;
6031 alu.dst.chan = j;
6032 if (j == 3)
6033 alu.last = 1;
6034 alu.dst.write = 1;
6035 r = r600_bytecode_add_alu(ctx->bc, &alu);
6036 if (r)
6037 return r;
6038 }
6039 }
6040 for (i = 1; i < 3; i++) {
6041 /* set gradients h/v */
6042 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
6043 tex.op = (i == 1) ? FETCH_OP_SET_GRADIENTS_H :
6044 FETCH_OP_SET_GRADIENTS_V;
6045 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
6046 tex.sampler_index_mode = sampler_index_mode;
6047 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
6048 tex.resource_index_mode = sampler_index_mode;
6049
6050 tex.src_gpr = (i == 1) ? temp_h : temp_v;
6051 tex.src_sel_x = 0;
6052 tex.src_sel_y = 1;
6053 tex.src_sel_z = 2;
6054 tex.src_sel_w = 3;
6055
6056 tex.dst_gpr = r600_get_temp(ctx); /* just to avoid confusing the asm scheduler */
6057 tex.dst_sel_x = tex.dst_sel_y = tex.dst_sel_z = tex.dst_sel_w = 7;
6058 if (inst->Texture.Texture != TGSI_TEXTURE_RECT) {
6059 tex.coord_type_x = 1;
6060 tex.coord_type_y = 1;
6061 tex.coord_type_z = 1;
6062 tex.coord_type_w = 1;
6063 }
6064 r = r600_bytecode_add_tex(ctx->bc, &tex);
6065 if (r)
6066 return r;
6067 }
6068 }
6069
6070 if (src_requires_loading && !src_loaded) {
6071 for (i = 0; i < 4; i++) {
6072 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6073 alu.op = ALU_OP1_MOV;
6074 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6075 alu.dst.sel = ctx->temp_reg;
6076 alu.dst.chan = i;
6077 if (i == 3)
6078 alu.last = 1;
6079 alu.dst.write = 1;
6080 r = r600_bytecode_add_alu(ctx->bc, &alu);
6081 if (r)
6082 return r;
6083 }
6084 src_loaded = TRUE;
6085 src_gpr = ctx->temp_reg;
6086 }
6087
6088 /* get offset values */
6089 if (inst->Texture.NumOffsets) {
6090 assert(inst->Texture.NumOffsets == 1);
6091
6092 /* The texture offset feature doesn't work with the TXF instruction
6093 * and must be emulated by adding the offset to the texture coordinates. */
6094 if (txf_add_offsets) {
6095 const struct tgsi_texture_offset *off = inst->TexOffsets;
6096
6097 switch (inst->Texture.Texture) {
6098 case TGSI_TEXTURE_3D:
6099 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6100 alu.op = ALU_OP2_ADD_INT;
6101 alu.src[0].sel = src_gpr;
6102 alu.src[0].chan = 2;
6103 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6104 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleZ];
6105 alu.dst.sel = src_gpr;
6106 alu.dst.chan = 2;
6107 alu.dst.write = 1;
6108 alu.last = 1;
6109 r = r600_bytecode_add_alu(ctx->bc, &alu);
6110 if (r)
6111 return r;
6112 /* fall through */
6113
6114 case TGSI_TEXTURE_2D:
6115 case TGSI_TEXTURE_SHADOW2D:
6116 case TGSI_TEXTURE_RECT:
6117 case TGSI_TEXTURE_SHADOWRECT:
6118 case TGSI_TEXTURE_2D_ARRAY:
6119 case TGSI_TEXTURE_SHADOW2D_ARRAY:
6120 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6121 alu.op = ALU_OP2_ADD_INT;
6122 alu.src[0].sel = src_gpr;
6123 alu.src[0].chan = 1;
6124 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6125 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleY];
6126 alu.dst.sel = src_gpr;
6127 alu.dst.chan = 1;
6128 alu.dst.write = 1;
6129 alu.last = 1;
6130 r = r600_bytecode_add_alu(ctx->bc, &alu);
6131 if (r)
6132 return r;
6133 /* fall through */
6134
6135 case TGSI_TEXTURE_1D:
6136 case TGSI_TEXTURE_SHADOW1D:
6137 case TGSI_TEXTURE_1D_ARRAY:
6138 case TGSI_TEXTURE_SHADOW1D_ARRAY:
6139 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6140 alu.op = ALU_OP2_ADD_INT;
6141 alu.src[0].sel = src_gpr;
6142 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6143 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleX];
6144 alu.dst.sel = src_gpr;
6145 alu.dst.write = 1;
6146 alu.last = 1;
6147 r = r600_bytecode_add_alu(ctx->bc, &alu);
6148 if (r)
6149 return r;
6150 break;
6151 /* texture offsets do not apply to other texture targets */
6152 }
6153 } else {
6154 switch (inst->Texture.Texture) {
6155 case TGSI_TEXTURE_3D:
6156 offset_z = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleZ] << 1;
6157 /* fallthrough */
6158 case TGSI_TEXTURE_2D:
6159 case TGSI_TEXTURE_SHADOW2D:
6160 case TGSI_TEXTURE_RECT:
6161 case TGSI_TEXTURE_SHADOWRECT:
6162 case TGSI_TEXTURE_2D_ARRAY:
6163 case TGSI_TEXTURE_SHADOW2D_ARRAY:
6164 offset_y = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleY] << 1;
6165 /* fallthrough */
6166 case TGSI_TEXTURE_1D:
6167 case TGSI_TEXTURE_SHADOW1D:
6168 case TGSI_TEXTURE_1D_ARRAY:
6169 case TGSI_TEXTURE_SHADOW1D_ARRAY:
6170 offset_x = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleX] << 1;
6171 }
6172 }
6173 }
6174
6175 /* Obtain the sample index for reading a compressed MSAA color texture.
6176 * To read the FMASK, we use the ldfptr instruction, which tells us
6177 * where the samples are stored.
6178 * For uncompressed 8x MSAA surfaces, ldfptr should return 0x76543210,
6179 * which is the identity mapping. Each nibble says which physical sample
6180 * should be fetched to get that sample.
6181 *
6182 * Assume src.z contains the sample index. It should be modified like this:
6183 * src.z = (ldfptr() >> (src.z * 4)) & 0xF;
6184 * Then fetch the texel with src.
6185 */
6186 if (read_compressed_msaa) {
6187 unsigned sample_chan = 3;
6188 unsigned temp = r600_get_temp(ctx);
6189 assert(src_loaded);
6190
6191 /* temp.w = ldfptr() */
6192 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
6193 tex.op = FETCH_OP_LD;
6194 tex.inst_mod = 1; /* to indicate this is ldfptr */
6195 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
6196 tex.sampler_index_mode = sampler_index_mode;
6197 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
6198 tex.resource_index_mode = sampler_index_mode;
6199 tex.src_gpr = src_gpr;
6200 tex.dst_gpr = temp;
6201 tex.dst_sel_x = 7; /* mask out these components */
6202 tex.dst_sel_y = 7;
6203 tex.dst_sel_z = 7;
6204 tex.dst_sel_w = 0; /* store X */
6205 tex.src_sel_x = 0;
6206 tex.src_sel_y = 1;
6207 tex.src_sel_z = 2;
6208 tex.src_sel_w = 3;
6209 tex.offset_x = offset_x;
6210 tex.offset_y = offset_y;
6211 tex.offset_z = offset_z;
6212 r = r600_bytecode_add_tex(ctx->bc, &tex);
6213 if (r)
6214 return r;
6215
6216 /* temp.x = sample_index*4 */
6217 if (ctx->bc->chip_class == CAYMAN) {
6218 for (i = 0 ; i < 4; i++) {
6219 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6220 alu.op = ALU_OP2_MULLO_INT;
6221 alu.src[0].sel = src_gpr;
6222 alu.src[0].chan = sample_chan;
6223 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6224 alu.src[1].value = 4;
6225 alu.dst.sel = temp;
6226 alu.dst.chan = i;
6227 alu.dst.write = i == 0;
6228 if (i == 3)
6229 alu.last = 1;
6230 r = r600_bytecode_add_alu(ctx->bc, &alu);
6231 if (r)
6232 return r;
6233 }
6234 } else {
6235 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6236 alu.op = ALU_OP2_MULLO_INT;
6237 alu.src[0].sel = src_gpr;
6238 alu.src[0].chan = sample_chan;
6239 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6240 alu.src[1].value = 4;
6241 alu.dst.sel = temp;
6242 alu.dst.chan = 0;
6243 alu.dst.write = 1;
6244 alu.last = 1;
6245 r = r600_bytecode_add_alu(ctx->bc, &alu);
6246 if (r)
6247 return r;
6248 }
6249
6250 /* sample_index = temp.w >> temp.x */
6251 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6252 alu.op = ALU_OP2_LSHR_INT;
6253 alu.src[0].sel = temp;
6254 alu.src[0].chan = 3;
6255 alu.src[1].sel = temp;
6256 alu.src[1].chan = 0;
6257 alu.dst.sel = src_gpr;
6258 alu.dst.chan = sample_chan;
6259 alu.dst.write = 1;
6260 alu.last = 1;
6261 r = r600_bytecode_add_alu(ctx->bc, &alu);
6262 if (r)
6263 return r;
6264
6265 /* sample_index & 0xF */
6266 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6267 alu.op = ALU_OP2_AND_INT;
6268 alu.src[0].sel = src_gpr;
6269 alu.src[0].chan = sample_chan;
6270 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6271 alu.src[1].value = 0xF;
6272 alu.dst.sel = src_gpr;
6273 alu.dst.chan = sample_chan;
6274 alu.dst.write = 1;
6275 alu.last = 1;
6276 r = r600_bytecode_add_alu(ctx->bc, &alu);
6277 if (r)
6278 return r;
6279 #if 0
6280 /* visualize the FMASK */
6281 for (i = 0; i < 4; i++) {
6282 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6283 alu.op = ALU_OP1_INT_TO_FLT;
6284 alu.src[0].sel = src_gpr;
6285 alu.src[0].chan = sample_chan;
6286 alu.dst.sel = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
6287 alu.dst.chan = i;
6288 alu.dst.write = 1;
6289 alu.last = 1;
6290 r = r600_bytecode_add_alu(ctx->bc, &alu);
6291 if (r)
6292 return r;
6293 }
6294 return 0;
6295 #endif
6296 }
6297
6298 /* does this shader want a num layers from TXQ for a cube array? */
6299 if (has_txq_cube_array_z) {
6300 int id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
6301
6302 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6303 alu.op = ALU_OP1_MOV;
6304
6305 alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
6306 if (ctx->bc->chip_class >= EVERGREEN) {
6307 /* channel 1 or 3 of each word */
6308 alu.src[0].sel += (id / 2);
6309 alu.src[0].chan = ((id % 2) * 2) + 1;
6310 } else {
6311 /* r600 we have them at channel 2 of the second dword */
6312 alu.src[0].sel += (id * 2) + 1;
6313 alu.src[0].chan = 2;
6314 }
6315 alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
6316 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
6317 alu.last = 1;
6318 r = r600_bytecode_add_alu(ctx->bc, &alu);
6319 if (r)
6320 return r;
6321 /* disable writemask from texture instruction */
6322 inst->Dst[0].Register.WriteMask &= ~4;
6323 }
6324
6325 opcode = ctx->inst_info->op;
6326 if (opcode == FETCH_OP_GATHER4 &&
6327 inst->TexOffsets[0].File != TGSI_FILE_NULL &&
6328 inst->TexOffsets[0].File != TGSI_FILE_IMMEDIATE) {
6329 opcode = FETCH_OP_GATHER4_O;
6330
6331 /* GATHER4_O/GATHER4_C_O use offset values loaded by
6332 SET_TEXTURE_OFFSETS instruction. The immediate offset values
6333 encoded in the instruction are ignored. */
6334 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
6335 tex.op = FETCH_OP_SET_TEXTURE_OFFSETS;
6336 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
6337 tex.sampler_index_mode = sampler_index_mode;
6338 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
6339 tex.resource_index_mode = sampler_index_mode;
6340
6341 tex.src_gpr = ctx->file_offset[inst->TexOffsets[0].File] + inst->TexOffsets[0].Index;
6342 tex.src_sel_x = inst->TexOffsets[0].SwizzleX;
6343 tex.src_sel_y = inst->TexOffsets[0].SwizzleY;
6344 tex.src_sel_z = inst->TexOffsets[0].SwizzleZ;
6345 tex.src_sel_w = 4;
6346
6347 tex.dst_sel_x = 7;
6348 tex.dst_sel_y = 7;
6349 tex.dst_sel_z = 7;
6350 tex.dst_sel_w = 7;
6351
6352 r = r600_bytecode_add_tex(ctx->bc, &tex);
6353 if (r)
6354 return r;
6355 }
6356
6357 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
6358 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
6359 inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
6360 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
6361 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY ||
6362 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
6363 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
6364 switch (opcode) {
6365 case FETCH_OP_SAMPLE:
6366 opcode = FETCH_OP_SAMPLE_C;
6367 break;
6368 case FETCH_OP_SAMPLE_L:
6369 opcode = FETCH_OP_SAMPLE_C_L;
6370 break;
6371 case FETCH_OP_SAMPLE_LB:
6372 opcode = FETCH_OP_SAMPLE_C_LB;
6373 break;
6374 case FETCH_OP_SAMPLE_G:
6375 opcode = FETCH_OP_SAMPLE_C_G;
6376 break;
6377 /* Texture gather variants */
6378 case FETCH_OP_GATHER4:
6379 opcode = FETCH_OP_GATHER4_C;
6380 break;
6381 case FETCH_OP_GATHER4_O:
6382 opcode = FETCH_OP_GATHER4_C_O;
6383 break;
6384 }
6385 }
6386
6387 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
6388 tex.op = opcode;
6389
6390 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
6391 tex.sampler_index_mode = sampler_index_mode;
6392 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
6393 tex.resource_index_mode = sampler_index_mode;
6394 tex.src_gpr = src_gpr;
6395 tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
6396
6397 if (inst->Instruction.Opcode == TGSI_OPCODE_DDX_FINE ||
6398 inst->Instruction.Opcode == TGSI_OPCODE_DDY_FINE) {
6399 tex.inst_mod = 1; /* per pixel gradient calculation instead of per 2x2 quad */
6400 }
6401
6402 if (inst->Instruction.Opcode == TGSI_OPCODE_TG4) {
6403 int8_t texture_component_select = ctx->literals[4 * inst->Src[1].Register.Index + inst->Src[1].Register.SwizzleX];
6404 tex.inst_mod = texture_component_select;
6405
6406 if (ctx->bc->chip_class == CAYMAN) {
6407 /* GATHER4 result order is different from TGSI TG4 */
6408 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 0 : 7;
6409 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 4) ? 1 : 7;
6410 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 1) ? 2 : 7;
6411 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
6412 } else {
6413 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
6414 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
6415 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
6416 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
6417 }
6418 }
6419 else if (inst->Instruction.Opcode == TGSI_OPCODE_LODQ) {
6420 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
6421 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
6422 tex.dst_sel_z = 7;
6423 tex.dst_sel_w = 7;
6424 }
6425 else if (inst->Instruction.Opcode == TGSI_OPCODE_TXQS) {
6426 tex.dst_sel_x = 3;
6427 tex.dst_sel_y = 7;
6428 tex.dst_sel_z = 7;
6429 tex.dst_sel_w = 7;
6430 }
6431 else {
6432 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
6433 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
6434 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
6435 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
6436 }
6437
6438
6439 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ_LZ ||
6440 inst->Instruction.Opcode == TGSI_OPCODE_TXQS) {
6441 tex.src_sel_x = 4;
6442 tex.src_sel_y = 4;
6443 tex.src_sel_z = 4;
6444 tex.src_sel_w = 4;
6445 } else if (src_loaded) {
6446 tex.src_sel_x = 0;
6447 tex.src_sel_y = 1;
6448 tex.src_sel_z = 2;
6449 tex.src_sel_w = 3;
6450 } else {
6451 tex.src_sel_x = ctx->src[0].swizzle[0];
6452 tex.src_sel_y = ctx->src[0].swizzle[1];
6453 tex.src_sel_z = ctx->src[0].swizzle[2];
6454 tex.src_sel_w = ctx->src[0].swizzle[3];
6455 tex.src_rel = ctx->src[0].rel;
6456 }
6457
6458 if (inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
6459 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
6460 inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
6461 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
6462 tex.src_sel_x = 1;
6463 tex.src_sel_y = 0;
6464 tex.src_sel_z = 3;
6465 tex.src_sel_w = 2; /* route Z compare or Lod value into W */
6466 }
6467
6468 if (inst->Texture.Texture != TGSI_TEXTURE_RECT &&
6469 inst->Texture.Texture != TGSI_TEXTURE_SHADOWRECT) {
6470 tex.coord_type_x = 1;
6471 tex.coord_type_y = 1;
6472 }
6473 tex.coord_type_z = 1;
6474 tex.coord_type_w = 1;
6475
6476 tex.offset_x = offset_x;
6477 tex.offset_y = offset_y;
6478 if (inst->Instruction.Opcode == TGSI_OPCODE_TG4 &&
6479 (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
6480 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY)) {
6481 tex.offset_z = 0;
6482 }
6483 else {
6484 tex.offset_z = offset_z;
6485 }
6486
6487 /* Put the depth for comparison in W.
6488 * TGSI_TEXTURE_SHADOW2D_ARRAY already has the depth in W.
6489 * Some instructions expect the depth in Z. */
6490 if ((inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
6491 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
6492 inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
6493 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY) &&
6494 opcode != FETCH_OP_SAMPLE_C_L &&
6495 opcode != FETCH_OP_SAMPLE_C_LB) {
6496 tex.src_sel_w = tex.src_sel_z;
6497 }
6498
6499 if (inst->Texture.Texture == TGSI_TEXTURE_1D_ARRAY ||
6500 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY) {
6501 if (opcode == FETCH_OP_SAMPLE_C_L ||
6502 opcode == FETCH_OP_SAMPLE_C_LB) {
6503 /* the array index is read from Y */
6504 tex.coord_type_y = 0;
6505 } else {
6506 /* the array index is read from Z */
6507 tex.coord_type_z = 0;
6508 tex.src_sel_z = tex.src_sel_y;
6509 }
6510 } else if (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
6511 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
6512 ((inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
6513 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) &&
6514 (ctx->bc->chip_class >= EVERGREEN)))
6515 /* the array index is read from Z */
6516 tex.coord_type_z = 0;
6517
6518 /* mask unused source components */
6519 if (opcode == FETCH_OP_SAMPLE || opcode == FETCH_OP_GATHER4) {
6520 switch (inst->Texture.Texture) {
6521 case TGSI_TEXTURE_2D:
6522 case TGSI_TEXTURE_RECT:
6523 tex.src_sel_z = 7;
6524 tex.src_sel_w = 7;
6525 break;
6526 case TGSI_TEXTURE_1D_ARRAY:
6527 tex.src_sel_y = 7;
6528 tex.src_sel_w = 7;
6529 break;
6530 case TGSI_TEXTURE_1D:
6531 tex.src_sel_y = 7;
6532 tex.src_sel_z = 7;
6533 tex.src_sel_w = 7;
6534 break;
6535 }
6536 }
6537
6538 r = r600_bytecode_add_tex(ctx->bc, &tex);
6539 if (r)
6540 return r;
6541
6542 /* add shadow ambient support - gallium doesn't do it yet */
6543 return 0;
6544 }
6545
6546 static int tgsi_lrp(struct r600_shader_ctx *ctx)
6547 {
6548 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6549 struct r600_bytecode_alu alu;
6550 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
6551 unsigned i, temp_regs[2];
6552 int r;
6553
6554 /* optimize if it's just an equal balance */
6555 if (ctx->src[0].sel == V_SQ_ALU_SRC_0_5) {
6556 for (i = 0; i < lasti + 1; i++) {
6557 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6558 continue;
6559
6560 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6561 alu.op = ALU_OP2_ADD;
6562 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
6563 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
6564 alu.omod = 3;
6565 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6566 alu.dst.chan = i;
6567 if (i == lasti) {
6568 alu.last = 1;
6569 }
6570 r = r600_bytecode_add_alu(ctx->bc, &alu);
6571 if (r)
6572 return r;
6573 }
6574 return 0;
6575 }
6576
6577 /* 1 - src0 */
6578 for (i = 0; i < lasti + 1; i++) {
6579 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6580 continue;
6581
6582 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6583 alu.op = ALU_OP2_ADD;
6584 alu.src[0].sel = V_SQ_ALU_SRC_1;
6585 alu.src[0].chan = 0;
6586 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
6587 r600_bytecode_src_toggle_neg(&alu.src[1]);
6588 alu.dst.sel = ctx->temp_reg;
6589 alu.dst.chan = i;
6590 if (i == lasti) {
6591 alu.last = 1;
6592 }
6593 alu.dst.write = 1;
6594 r = r600_bytecode_add_alu(ctx->bc, &alu);
6595 if (r)
6596 return r;
6597 }
6598
6599 /* (1 - src0) * src2 */
6600 for (i = 0; i < lasti + 1; i++) {
6601 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6602 continue;
6603
6604 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6605 alu.op = ALU_OP2_MUL;
6606 alu.src[0].sel = ctx->temp_reg;
6607 alu.src[0].chan = i;
6608 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
6609 alu.dst.sel = ctx->temp_reg;
6610 alu.dst.chan = i;
6611 if (i == lasti) {
6612 alu.last = 1;
6613 }
6614 alu.dst.write = 1;
6615 r = r600_bytecode_add_alu(ctx->bc, &alu);
6616 if (r)
6617 return r;
6618 }
6619
6620 /* src0 * src1 + (1 - src0) * src2 */
6621 if (ctx->src[0].abs)
6622 temp_regs[0] = r600_get_temp(ctx);
6623 else
6624 temp_regs[0] = 0;
6625 if (ctx->src[1].abs)
6626 temp_regs[1] = r600_get_temp(ctx);
6627 else
6628 temp_regs[1] = 0;
6629
6630 for (i = 0; i < lasti + 1; i++) {
6631 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6632 continue;
6633
6634 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6635 alu.op = ALU_OP3_MULADD;
6636 alu.is_op3 = 1;
6637 r = tgsi_make_src_for_op3(ctx, temp_regs[0], i, &alu.src[0], &ctx->src[0]);
6638 if (r)
6639 return r;
6640 r = tgsi_make_src_for_op3(ctx, temp_regs[1], i, &alu.src[1], &ctx->src[1]);
6641 if (r)
6642 return r;
6643 alu.src[2].sel = ctx->temp_reg;
6644 alu.src[2].chan = i;
6645
6646 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6647 alu.dst.chan = i;
6648 if (i == lasti) {
6649 alu.last = 1;
6650 }
6651 r = r600_bytecode_add_alu(ctx->bc, &alu);
6652 if (r)
6653 return r;
6654 }
6655 return 0;
6656 }
6657
6658 static int tgsi_cmp(struct r600_shader_ctx *ctx)
6659 {
6660 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6661 struct r600_bytecode_alu alu;
6662 int i, r, j;
6663 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
6664 int temp_regs[3];
6665
6666 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
6667 temp_regs[j] = 0;
6668 if (ctx->src[j].abs)
6669 temp_regs[j] = r600_get_temp(ctx);
6670 }
6671
6672 for (i = 0; i < lasti + 1; i++) {
6673 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6674 continue;
6675
6676 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6677 alu.op = ALU_OP3_CNDGE;
6678 r = tgsi_make_src_for_op3(ctx, temp_regs[0], i, &alu.src[0], &ctx->src[0]);
6679 if (r)
6680 return r;
6681 r = tgsi_make_src_for_op3(ctx, temp_regs[2], i, &alu.src[1], &ctx->src[2]);
6682 if (r)
6683 return r;
6684 r = tgsi_make_src_for_op3(ctx, temp_regs[1], i, &alu.src[2], &ctx->src[1]);
6685 if (r)
6686 return r;
6687 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6688 alu.dst.chan = i;
6689 alu.dst.write = 1;
6690 alu.is_op3 = 1;
6691 if (i == lasti)
6692 alu.last = 1;
6693 r = r600_bytecode_add_alu(ctx->bc, &alu);
6694 if (r)
6695 return r;
6696 }
6697 return 0;
6698 }
6699
6700 static int tgsi_ucmp(struct r600_shader_ctx *ctx)
6701 {
6702 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6703 struct r600_bytecode_alu alu;
6704 int i, r;
6705 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
6706
6707 for (i = 0; i < lasti + 1; i++) {
6708 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6709 continue;
6710
6711 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6712 alu.op = ALU_OP3_CNDE_INT;
6713 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6714 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
6715 r600_bytecode_src(&alu.src[2], &ctx->src[1], i);
6716 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6717 alu.dst.chan = i;
6718 alu.dst.write = 1;
6719 alu.is_op3 = 1;
6720 if (i == lasti)
6721 alu.last = 1;
6722 r = r600_bytecode_add_alu(ctx->bc, &alu);
6723 if (r)
6724 return r;
6725 }
6726 return 0;
6727 }
6728
6729 static int tgsi_xpd(struct r600_shader_ctx *ctx)
6730 {
6731 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6732 static const unsigned int src0_swizzle[] = {2, 0, 1};
6733 static const unsigned int src1_swizzle[] = {1, 2, 0};
6734 struct r600_bytecode_alu alu;
6735 uint32_t use_temp = 0;
6736 int i, r;
6737
6738 if (inst->Dst[0].Register.WriteMask != 0xf)
6739 use_temp = 1;
6740
6741 for (i = 0; i < 4; i++) {
6742 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6743 alu.op = ALU_OP2_MUL;
6744 if (i < 3) {
6745 r600_bytecode_src(&alu.src[0], &ctx->src[0], src0_swizzle[i]);
6746 r600_bytecode_src(&alu.src[1], &ctx->src[1], src1_swizzle[i]);
6747 } else {
6748 alu.src[0].sel = V_SQ_ALU_SRC_0;
6749 alu.src[0].chan = i;
6750 alu.src[1].sel = V_SQ_ALU_SRC_0;
6751 alu.src[1].chan = i;
6752 }
6753
6754 alu.dst.sel = ctx->temp_reg;
6755 alu.dst.chan = i;
6756 alu.dst.write = 1;
6757
6758 if (i == 3)
6759 alu.last = 1;
6760 r = r600_bytecode_add_alu(ctx->bc, &alu);
6761 if (r)
6762 return r;
6763 }
6764
6765 for (i = 0; i < 4; i++) {
6766 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6767 alu.op = ALU_OP3_MULADD;
6768
6769 if (i < 3) {
6770 r600_bytecode_src(&alu.src[0], &ctx->src[0], src1_swizzle[i]);
6771 r600_bytecode_src(&alu.src[1], &ctx->src[1], src0_swizzle[i]);
6772 } else {
6773 alu.src[0].sel = V_SQ_ALU_SRC_0;
6774 alu.src[0].chan = i;
6775 alu.src[1].sel = V_SQ_ALU_SRC_0;
6776 alu.src[1].chan = i;
6777 }
6778
6779 alu.src[2].sel = ctx->temp_reg;
6780 alu.src[2].neg = 1;
6781 alu.src[2].chan = i;
6782
6783 if (use_temp)
6784 alu.dst.sel = ctx->temp_reg;
6785 else
6786 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6787 alu.dst.chan = i;
6788 alu.dst.write = 1;
6789 alu.is_op3 = 1;
6790 if (i == 3)
6791 alu.last = 1;
6792 r = r600_bytecode_add_alu(ctx->bc, &alu);
6793 if (r)
6794 return r;
6795 }
6796 if (use_temp)
6797 return tgsi_helper_copy(ctx, inst);
6798 return 0;
6799 }
6800
6801 static int tgsi_exp(struct r600_shader_ctx *ctx)
6802 {
6803 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6804 struct r600_bytecode_alu alu;
6805 int r;
6806 int i;
6807
6808 /* result.x = 2^floor(src); */
6809 if (inst->Dst[0].Register.WriteMask & 1) {
6810 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6811
6812 alu.op = ALU_OP1_FLOOR;
6813 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6814
6815 alu.dst.sel = ctx->temp_reg;
6816 alu.dst.chan = 0;
6817 alu.dst.write = 1;
6818 alu.last = 1;
6819 r = r600_bytecode_add_alu(ctx->bc, &alu);
6820 if (r)
6821 return r;
6822
6823 if (ctx->bc->chip_class == CAYMAN) {
6824 for (i = 0; i < 3; i++) {
6825 alu.op = ALU_OP1_EXP_IEEE;
6826 alu.src[0].sel = ctx->temp_reg;
6827 alu.src[0].chan = 0;
6828
6829 alu.dst.sel = ctx->temp_reg;
6830 alu.dst.chan = i;
6831 alu.dst.write = i == 0;
6832 alu.last = i == 2;
6833 r = r600_bytecode_add_alu(ctx->bc, &alu);
6834 if (r)
6835 return r;
6836 }
6837 } else {
6838 alu.op = ALU_OP1_EXP_IEEE;
6839 alu.src[0].sel = ctx->temp_reg;
6840 alu.src[0].chan = 0;
6841
6842 alu.dst.sel = ctx->temp_reg;
6843 alu.dst.chan = 0;
6844 alu.dst.write = 1;
6845 alu.last = 1;
6846 r = r600_bytecode_add_alu(ctx->bc, &alu);
6847 if (r)
6848 return r;
6849 }
6850 }
6851
6852 /* result.y = tmp - floor(tmp); */
6853 if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
6854 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6855
6856 alu.op = ALU_OP1_FRACT;
6857 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6858
6859 alu.dst.sel = ctx->temp_reg;
6860 #if 0
6861 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6862 if (r)
6863 return r;
6864 #endif
6865 alu.dst.write = 1;
6866 alu.dst.chan = 1;
6867
6868 alu.last = 1;
6869
6870 r = r600_bytecode_add_alu(ctx->bc, &alu);
6871 if (r)
6872 return r;
6873 }
6874
6875 /* result.z = RoughApprox2ToX(tmp);*/
6876 if ((inst->Dst[0].Register.WriteMask >> 2) & 0x1) {
6877 if (ctx->bc->chip_class == CAYMAN) {
6878 for (i = 0; i < 3; i++) {
6879 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6880 alu.op = ALU_OP1_EXP_IEEE;
6881 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6882
6883 alu.dst.sel = ctx->temp_reg;
6884 alu.dst.chan = i;
6885 if (i == 2) {
6886 alu.dst.write = 1;
6887 alu.last = 1;
6888 }
6889
6890 r = r600_bytecode_add_alu(ctx->bc, &alu);
6891 if (r)
6892 return r;
6893 }
6894 } else {
6895 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6896 alu.op = ALU_OP1_EXP_IEEE;
6897 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6898
6899 alu.dst.sel = ctx->temp_reg;
6900 alu.dst.write = 1;
6901 alu.dst.chan = 2;
6902
6903 alu.last = 1;
6904
6905 r = r600_bytecode_add_alu(ctx->bc, &alu);
6906 if (r)
6907 return r;
6908 }
6909 }
6910
6911 /* result.w = 1.0;*/
6912 if ((inst->Dst[0].Register.WriteMask >> 3) & 0x1) {
6913 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6914
6915 alu.op = ALU_OP1_MOV;
6916 alu.src[0].sel = V_SQ_ALU_SRC_1;
6917 alu.src[0].chan = 0;
6918
6919 alu.dst.sel = ctx->temp_reg;
6920 alu.dst.chan = 3;
6921 alu.dst.write = 1;
6922 alu.last = 1;
6923 r = r600_bytecode_add_alu(ctx->bc, &alu);
6924 if (r)
6925 return r;
6926 }
6927 return tgsi_helper_copy(ctx, inst);
6928 }
6929
6930 static int tgsi_log(struct r600_shader_ctx *ctx)
6931 {
6932 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6933 struct r600_bytecode_alu alu;
6934 int r;
6935 int i;
6936
6937 /* result.x = floor(log2(|src|)); */
6938 if (inst->Dst[0].Register.WriteMask & 1) {
6939 if (ctx->bc->chip_class == CAYMAN) {
6940 for (i = 0; i < 3; i++) {
6941 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6942
6943 alu.op = ALU_OP1_LOG_IEEE;
6944 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6945 r600_bytecode_src_set_abs(&alu.src[0]);
6946
6947 alu.dst.sel = ctx->temp_reg;
6948 alu.dst.chan = i;
6949 if (i == 0)
6950 alu.dst.write = 1;
6951 if (i == 2)
6952 alu.last = 1;
6953 r = r600_bytecode_add_alu(ctx->bc, &alu);
6954 if (r)
6955 return r;
6956 }
6957
6958 } else {
6959 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6960
6961 alu.op = ALU_OP1_LOG_IEEE;
6962 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6963 r600_bytecode_src_set_abs(&alu.src[0]);
6964
6965 alu.dst.sel = ctx->temp_reg;
6966 alu.dst.chan = 0;
6967 alu.dst.write = 1;
6968 alu.last = 1;
6969 r = r600_bytecode_add_alu(ctx->bc, &alu);
6970 if (r)
6971 return r;
6972 }
6973
6974 alu.op = ALU_OP1_FLOOR;
6975 alu.src[0].sel = ctx->temp_reg;
6976 alu.src[0].chan = 0;
6977
6978 alu.dst.sel = ctx->temp_reg;
6979 alu.dst.chan = 0;
6980 alu.dst.write = 1;
6981 alu.last = 1;
6982
6983 r = r600_bytecode_add_alu(ctx->bc, &alu);
6984 if (r)
6985 return r;
6986 }
6987
6988 /* result.y = |src.x| / (2 ^ floor(log2(|src.x|))); */
6989 if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
6990
6991 if (ctx->bc->chip_class == CAYMAN) {
6992 for (i = 0; i < 3; i++) {
6993 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6994
6995 alu.op = ALU_OP1_LOG_IEEE;
6996 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6997 r600_bytecode_src_set_abs(&alu.src[0]);
6998
6999 alu.dst.sel = ctx->temp_reg;
7000 alu.dst.chan = i;
7001 if (i == 1)
7002 alu.dst.write = 1;
7003 if (i == 2)
7004 alu.last = 1;
7005
7006 r = r600_bytecode_add_alu(ctx->bc, &alu);
7007 if (r)
7008 return r;
7009 }
7010 } else {
7011 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7012
7013 alu.op = ALU_OP1_LOG_IEEE;
7014 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
7015 r600_bytecode_src_set_abs(&alu.src[0]);
7016
7017 alu.dst.sel = ctx->temp_reg;
7018 alu.dst.chan = 1;
7019 alu.dst.write = 1;
7020 alu.last = 1;
7021
7022 r = r600_bytecode_add_alu(ctx->bc, &alu);
7023 if (r)
7024 return r;
7025 }
7026
7027 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7028
7029 alu.op = ALU_OP1_FLOOR;
7030 alu.src[0].sel = ctx->temp_reg;
7031 alu.src[0].chan = 1;
7032
7033 alu.dst.sel = ctx->temp_reg;
7034 alu.dst.chan = 1;
7035 alu.dst.write = 1;
7036 alu.last = 1;
7037
7038 r = r600_bytecode_add_alu(ctx->bc, &alu);
7039 if (r)
7040 return r;
7041
7042 if (ctx->bc->chip_class == CAYMAN) {
7043 for (i = 0; i < 3; i++) {
7044 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7045 alu.op = ALU_OP1_EXP_IEEE;
7046 alu.src[0].sel = ctx->temp_reg;
7047 alu.src[0].chan = 1;
7048
7049 alu.dst.sel = ctx->temp_reg;
7050 alu.dst.chan = i;
7051 if (i == 1)
7052 alu.dst.write = 1;
7053 if (i == 2)
7054 alu.last = 1;
7055
7056 r = r600_bytecode_add_alu(ctx->bc, &alu);
7057 if (r)
7058 return r;
7059 }
7060 } else {
7061 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7062 alu.op = ALU_OP1_EXP_IEEE;
7063 alu.src[0].sel = ctx->temp_reg;
7064 alu.src[0].chan = 1;
7065
7066 alu.dst.sel = ctx->temp_reg;
7067 alu.dst.chan = 1;
7068 alu.dst.write = 1;
7069 alu.last = 1;
7070
7071 r = r600_bytecode_add_alu(ctx->bc, &alu);
7072 if (r)
7073 return r;
7074 }
7075
7076 if (ctx->bc->chip_class == CAYMAN) {
7077 for (i = 0; i < 3; i++) {
7078 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7079 alu.op = ALU_OP1_RECIP_IEEE;
7080 alu.src[0].sel = ctx->temp_reg;
7081 alu.src[0].chan = 1;
7082
7083 alu.dst.sel = ctx->temp_reg;
7084 alu.dst.chan = i;
7085 if (i == 1)
7086 alu.dst.write = 1;
7087 if (i == 2)
7088 alu.last = 1;
7089
7090 r = r600_bytecode_add_alu(ctx->bc, &alu);
7091 if (r)
7092 return r;
7093 }
7094 } else {
7095 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7096 alu.op = ALU_OP1_RECIP_IEEE;
7097 alu.src[0].sel = ctx->temp_reg;
7098 alu.src[0].chan = 1;
7099
7100 alu.dst.sel = ctx->temp_reg;
7101 alu.dst.chan = 1;
7102 alu.dst.write = 1;
7103 alu.last = 1;
7104
7105 r = r600_bytecode_add_alu(ctx->bc, &alu);
7106 if (r)
7107 return r;
7108 }
7109
7110 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7111
7112 alu.op = ALU_OP2_MUL;
7113
7114 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
7115 r600_bytecode_src_set_abs(&alu.src[0]);
7116
7117 alu.src[1].sel = ctx->temp_reg;
7118 alu.src[1].chan = 1;
7119
7120 alu.dst.sel = ctx->temp_reg;
7121 alu.dst.chan = 1;
7122 alu.dst.write = 1;
7123 alu.last = 1;
7124
7125 r = r600_bytecode_add_alu(ctx->bc, &alu);
7126 if (r)
7127 return r;
7128 }
7129
7130 /* result.z = log2(|src|);*/
7131 if ((inst->Dst[0].Register.WriteMask >> 2) & 1) {
7132 if (ctx->bc->chip_class == CAYMAN) {
7133 for (i = 0; i < 3; i++) {
7134 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7135
7136 alu.op = ALU_OP1_LOG_IEEE;
7137 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
7138 r600_bytecode_src_set_abs(&alu.src[0]);
7139
7140 alu.dst.sel = ctx->temp_reg;
7141 if (i == 2)
7142 alu.dst.write = 1;
7143 alu.dst.chan = i;
7144 if (i == 2)
7145 alu.last = 1;
7146
7147 r = r600_bytecode_add_alu(ctx->bc, &alu);
7148 if (r)
7149 return r;
7150 }
7151 } else {
7152 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7153
7154 alu.op = ALU_OP1_LOG_IEEE;
7155 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
7156 r600_bytecode_src_set_abs(&alu.src[0]);
7157
7158 alu.dst.sel = ctx->temp_reg;
7159 alu.dst.write = 1;
7160 alu.dst.chan = 2;
7161 alu.last = 1;
7162
7163 r = r600_bytecode_add_alu(ctx->bc, &alu);
7164 if (r)
7165 return r;
7166 }
7167 }
7168
7169 /* result.w = 1.0; */
7170 if ((inst->Dst[0].Register.WriteMask >> 3) & 1) {
7171 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7172
7173 alu.op = ALU_OP1_MOV;
7174 alu.src[0].sel = V_SQ_ALU_SRC_1;
7175 alu.src[0].chan = 0;
7176
7177 alu.dst.sel = ctx->temp_reg;
7178 alu.dst.chan = 3;
7179 alu.dst.write = 1;
7180 alu.last = 1;
7181
7182 r = r600_bytecode_add_alu(ctx->bc, &alu);
7183 if (r)
7184 return r;
7185 }
7186
7187 return tgsi_helper_copy(ctx, inst);
7188 }
7189
7190 static int tgsi_eg_arl(struct r600_shader_ctx *ctx)
7191 {
7192 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7193 struct r600_bytecode_alu alu;
7194 int r;
7195 int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
7196 unsigned reg = get_address_file_reg(ctx, inst->Dst[0].Register.Index);
7197
7198 assert(inst->Dst[0].Register.Index < 3);
7199 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7200
7201 switch (inst->Instruction.Opcode) {
7202 case TGSI_OPCODE_ARL:
7203 alu.op = ALU_OP1_FLT_TO_INT_FLOOR;
7204 break;
7205 case TGSI_OPCODE_ARR:
7206 alu.op = ALU_OP1_FLT_TO_INT;
7207 break;
7208 case TGSI_OPCODE_UARL:
7209 alu.op = ALU_OP1_MOV;
7210 break;
7211 default:
7212 assert(0);
7213 return -1;
7214 }
7215
7216 for (i = 0; i <= lasti; ++i) {
7217 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
7218 continue;
7219 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7220 alu.last = i == lasti;
7221 alu.dst.sel = reg;
7222 alu.dst.chan = i;
7223 alu.dst.write = 1;
7224 r = r600_bytecode_add_alu(ctx->bc, &alu);
7225 if (r)
7226 return r;
7227 }
7228
7229 if (inst->Dst[0].Register.Index > 0)
7230 ctx->bc->index_loaded[inst->Dst[0].Register.Index - 1] = 0;
7231 else
7232 ctx->bc->ar_loaded = 0;
7233
7234 return 0;
7235 }
7236 static int tgsi_r600_arl(struct r600_shader_ctx *ctx)
7237 {
7238 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7239 struct r600_bytecode_alu alu;
7240 int r;
7241 int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
7242
7243 switch (inst->Instruction.Opcode) {
7244 case TGSI_OPCODE_ARL:
7245 memset(&alu, 0, sizeof(alu));
7246 alu.op = ALU_OP1_FLOOR;
7247 alu.dst.sel = ctx->bc->ar_reg;
7248 alu.dst.write = 1;
7249 for (i = 0; i <= lasti; ++i) {
7250 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
7251 alu.dst.chan = i;
7252 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7253 alu.last = i == lasti;
7254 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
7255 return r;
7256 }
7257 }
7258
7259 memset(&alu, 0, sizeof(alu));
7260 alu.op = ALU_OP1_FLT_TO_INT;
7261 alu.src[0].sel = ctx->bc->ar_reg;
7262 alu.dst.sel = ctx->bc->ar_reg;
7263 alu.dst.write = 1;
7264 /* FLT_TO_INT is trans-only on r600/r700 */
7265 alu.last = TRUE;
7266 for (i = 0; i <= lasti; ++i) {
7267 alu.dst.chan = i;
7268 alu.src[0].chan = i;
7269 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
7270 return r;
7271 }
7272 break;
7273 case TGSI_OPCODE_ARR:
7274 memset(&alu, 0, sizeof(alu));
7275 alu.op = ALU_OP1_FLT_TO_INT;
7276 alu.dst.sel = ctx->bc->ar_reg;
7277 alu.dst.write = 1;
7278 /* FLT_TO_INT is trans-only on r600/r700 */
7279 alu.last = TRUE;
7280 for (i = 0; i <= lasti; ++i) {
7281 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
7282 alu.dst.chan = i;
7283 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7284 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
7285 return r;
7286 }
7287 }
7288 break;
7289 case TGSI_OPCODE_UARL:
7290 memset(&alu, 0, sizeof(alu));
7291 alu.op = ALU_OP1_MOV;
7292 alu.dst.sel = ctx->bc->ar_reg;
7293 alu.dst.write = 1;
7294 for (i = 0; i <= lasti; ++i) {
7295 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
7296 alu.dst.chan = i;
7297 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7298 alu.last = i == lasti;
7299 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
7300 return r;
7301 }
7302 }
7303 break;
7304 default:
7305 assert(0);
7306 return -1;
7307 }
7308
7309 ctx->bc->ar_loaded = 0;
7310 return 0;
7311 }
7312
7313 static int tgsi_opdst(struct r600_shader_ctx *ctx)
7314 {
7315 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7316 struct r600_bytecode_alu alu;
7317 int i, r = 0;
7318
7319 for (i = 0; i < 4; i++) {
7320 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7321
7322 alu.op = ALU_OP2_MUL;
7323 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
7324
7325 if (i == 0 || i == 3) {
7326 alu.src[0].sel = V_SQ_ALU_SRC_1;
7327 } else {
7328 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7329 }
7330
7331 if (i == 0 || i == 2) {
7332 alu.src[1].sel = V_SQ_ALU_SRC_1;
7333 } else {
7334 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
7335 }
7336 if (i == 3)
7337 alu.last = 1;
7338 r = r600_bytecode_add_alu(ctx->bc, &alu);
7339 if (r)
7340 return r;
7341 }
7342 return 0;
7343 }
7344
7345 static int emit_logic_pred(struct r600_shader_ctx *ctx, int opcode, int alu_type)
7346 {
7347 struct r600_bytecode_alu alu;
7348 int r;
7349
7350 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7351 alu.op = opcode;
7352 alu.execute_mask = 1;
7353 alu.update_pred = 1;
7354
7355 alu.dst.sel = ctx->temp_reg;
7356 alu.dst.write = 1;
7357 alu.dst.chan = 0;
7358
7359 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
7360 alu.src[1].sel = V_SQ_ALU_SRC_0;
7361 alu.src[1].chan = 0;
7362
7363 alu.last = 1;
7364
7365 r = r600_bytecode_add_alu_type(ctx->bc, &alu, alu_type);
7366 if (r)
7367 return r;
7368 return 0;
7369 }
7370
7371 static int pops(struct r600_shader_ctx *ctx, int pops)
7372 {
7373 unsigned force_pop = ctx->bc->force_add_cf;
7374
7375 if (!force_pop) {
7376 int alu_pop = 3;
7377 if (ctx->bc->cf_last) {
7378 if (ctx->bc->cf_last->op == CF_OP_ALU)
7379 alu_pop = 0;
7380 else if (ctx->bc->cf_last->op == CF_OP_ALU_POP_AFTER)
7381 alu_pop = 1;
7382 }
7383 alu_pop += pops;
7384 if (alu_pop == 1) {
7385 ctx->bc->cf_last->op = CF_OP_ALU_POP_AFTER;
7386 ctx->bc->force_add_cf = 1;
7387 } else if (alu_pop == 2) {
7388 ctx->bc->cf_last->op = CF_OP_ALU_POP2_AFTER;
7389 ctx->bc->force_add_cf = 1;
7390 } else {
7391 force_pop = 1;
7392 }
7393 }
7394
7395 if (force_pop) {
7396 r600_bytecode_add_cfinst(ctx->bc, CF_OP_POP);
7397 ctx->bc->cf_last->pop_count = pops;
7398 ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2;
7399 }
7400
7401 return 0;
7402 }
7403
7404 static inline void callstack_update_max_depth(struct r600_shader_ctx *ctx,
7405 unsigned reason)
7406 {
7407 struct r600_stack_info *stack = &ctx->bc->stack;
7408 unsigned elements, entries;
7409
7410 unsigned entry_size = stack->entry_size;
7411
7412 elements = (stack->loop + stack->push_wqm ) * entry_size;
7413 elements += stack->push;
7414
7415 switch (ctx->bc->chip_class) {
7416 case R600:
7417 case R700:
7418 /* pre-r8xx: if any non-WQM PUSH instruction is invoked, 2 elements on
7419 * the stack must be reserved to hold the current active/continue
7420 * masks */
7421 if (reason == FC_PUSH_VPM) {
7422 elements += 2;
7423 }
7424 break;
7425
7426 case CAYMAN:
7427 /* r9xx: any stack operation on empty stack consumes 2 additional
7428 * elements */
7429 elements += 2;
7430
7431 /* fallthrough */
7432 /* FIXME: do the two elements added above cover the cases for the
7433 * r8xx+ below? */
7434
7435 case EVERGREEN:
7436 /* r8xx+: 2 extra elements are not always required, but one extra
7437 * element must be added for each of the following cases:
7438 * 1. There is an ALU_ELSE_AFTER instruction at the point of greatest
7439 * stack usage.
7440 * (Currently we don't use ALU_ELSE_AFTER.)
7441 * 2. There are LOOP/WQM frames on the stack when any flavor of non-WQM
7442 * PUSH instruction executed.
7443 *
7444 * NOTE: it seems we also need to reserve additional element in some
7445 * other cases, e.g. when we have 4 levels of PUSH_VPM in the shader,
7446 * then STACK_SIZE should be 2 instead of 1 */
7447 if (reason == FC_PUSH_VPM) {
7448 elements += 1;
7449 }
7450 break;
7451
7452 default:
7453 assert(0);
7454 break;
7455 }
7456
7457 /* NOTE: it seems STACK_SIZE is interpreted by hw as if entry_size is 4
7458 * for all chips, so we use 4 in the final formula, not the real entry_size
7459 * for the chip */
7460 entry_size = 4;
7461
7462 entries = (elements + (entry_size - 1)) / entry_size;
7463
7464 if (entries > stack->max_entries)
7465 stack->max_entries = entries;
7466 }
7467
7468 static inline void callstack_pop(struct r600_shader_ctx *ctx, unsigned reason)
7469 {
7470 switch(reason) {
7471 case FC_PUSH_VPM:
7472 --ctx->bc->stack.push;
7473 assert(ctx->bc->stack.push >= 0);
7474 break;
7475 case FC_PUSH_WQM:
7476 --ctx->bc->stack.push_wqm;
7477 assert(ctx->bc->stack.push_wqm >= 0);
7478 break;
7479 case FC_LOOP:
7480 --ctx->bc->stack.loop;
7481 assert(ctx->bc->stack.loop >= 0);
7482 break;
7483 default:
7484 assert(0);
7485 break;
7486 }
7487 }
7488
7489 static inline void callstack_push(struct r600_shader_ctx *ctx, unsigned reason)
7490 {
7491 switch (reason) {
7492 case FC_PUSH_VPM:
7493 ++ctx->bc->stack.push;
7494 break;
7495 case FC_PUSH_WQM:
7496 ++ctx->bc->stack.push_wqm;
7497 case FC_LOOP:
7498 ++ctx->bc->stack.loop;
7499 break;
7500 default:
7501 assert(0);
7502 }
7503
7504 callstack_update_max_depth(ctx, reason);
7505 }
7506
7507 static void fc_set_mid(struct r600_shader_ctx *ctx, int fc_sp)
7508 {
7509 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[fc_sp];
7510
7511 sp->mid = realloc((void *)sp->mid,
7512 sizeof(struct r600_bytecode_cf *) * (sp->num_mid + 1));
7513 sp->mid[sp->num_mid] = ctx->bc->cf_last;
7514 sp->num_mid++;
7515 }
7516
7517 static void fc_pushlevel(struct r600_shader_ctx *ctx, int type)
7518 {
7519 ctx->bc->fc_sp++;
7520 ctx->bc->fc_stack[ctx->bc->fc_sp].type = type;
7521 ctx->bc->fc_stack[ctx->bc->fc_sp].start = ctx->bc->cf_last;
7522 }
7523
7524 static void fc_poplevel(struct r600_shader_ctx *ctx)
7525 {
7526 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[ctx->bc->fc_sp];
7527 free(sp->mid);
7528 sp->mid = NULL;
7529 sp->num_mid = 0;
7530 sp->start = NULL;
7531 sp->type = 0;
7532 ctx->bc->fc_sp--;
7533 }
7534
7535 #if 0
7536 static int emit_return(struct r600_shader_ctx *ctx)
7537 {
7538 r600_bytecode_add_cfinst(ctx->bc, CF_OP_RETURN));
7539 return 0;
7540 }
7541
7542 static int emit_jump_to_offset(struct r600_shader_ctx *ctx, int pops, int offset)
7543 {
7544
7545 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP));
7546 ctx->bc->cf_last->pop_count = pops;
7547 /* XXX work out offset */
7548 return 0;
7549 }
7550
7551 static int emit_setret_in_loop_flag(struct r600_shader_ctx *ctx, unsigned flag_value)
7552 {
7553 return 0;
7554 }
7555
7556 static void emit_testflag(struct r600_shader_ctx *ctx)
7557 {
7558
7559 }
7560
7561 static void emit_return_on_flag(struct r600_shader_ctx *ctx, unsigned ifidx)
7562 {
7563 emit_testflag(ctx);
7564 emit_jump_to_offset(ctx, 1, 4);
7565 emit_setret_in_loop_flag(ctx, V_SQ_ALU_SRC_0);
7566 pops(ctx, ifidx + 1);
7567 emit_return(ctx);
7568 }
7569
7570 static void break_loop_on_flag(struct r600_shader_ctx *ctx, unsigned fc_sp)
7571 {
7572 emit_testflag(ctx);
7573
7574 r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
7575 ctx->bc->cf_last->pop_count = 1;
7576
7577 fc_set_mid(ctx, fc_sp);
7578
7579 pops(ctx, 1);
7580 }
7581 #endif
7582
7583 static int emit_if(struct r600_shader_ctx *ctx, int opcode)
7584 {
7585 int alu_type = CF_OP_ALU_PUSH_BEFORE;
7586
7587 /* There is a hardware bug on Cayman where a BREAK/CONTINUE followed by
7588 * LOOP_STARTxxx for nested loops may put the branch stack into a state
7589 * such that ALU_PUSH_BEFORE doesn't work as expected. Workaround this
7590 * by replacing the ALU_PUSH_BEFORE with a PUSH + ALU */
7591 if (ctx->bc->chip_class == CAYMAN && ctx->bc->stack.loop > 1) {
7592 r600_bytecode_add_cfinst(ctx->bc, CF_OP_PUSH);
7593 ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2;
7594 alu_type = CF_OP_ALU;
7595 }
7596
7597 emit_logic_pred(ctx, opcode, alu_type);
7598
7599 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP);
7600
7601 fc_pushlevel(ctx, FC_IF);
7602
7603 callstack_push(ctx, FC_PUSH_VPM);
7604 return 0;
7605 }
7606
7607 static int tgsi_if(struct r600_shader_ctx *ctx)
7608 {
7609 return emit_if(ctx, ALU_OP2_PRED_SETNE);
7610 }
7611
7612 static int tgsi_uif(struct r600_shader_ctx *ctx)
7613 {
7614 return emit_if(ctx, ALU_OP2_PRED_SETNE_INT);
7615 }
7616
7617 static int tgsi_else(struct r600_shader_ctx *ctx)
7618 {
7619 r600_bytecode_add_cfinst(ctx->bc, CF_OP_ELSE);
7620 ctx->bc->cf_last->pop_count = 1;
7621
7622 fc_set_mid(ctx, ctx->bc->fc_sp);
7623 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id;
7624 return 0;
7625 }
7626
7627 static int tgsi_endif(struct r600_shader_ctx *ctx)
7628 {
7629 pops(ctx, 1);
7630 if (ctx->bc->fc_stack[ctx->bc->fc_sp].type != FC_IF) {
7631 R600_ERR("if/endif unbalanced in shader\n");
7632 return -1;
7633 }
7634
7635 if (ctx->bc->fc_stack[ctx->bc->fc_sp].mid == NULL) {
7636 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id + 2;
7637 ctx->bc->fc_stack[ctx->bc->fc_sp].start->pop_count = 1;
7638 } else {
7639 ctx->bc->fc_stack[ctx->bc->fc_sp].mid[0]->cf_addr = ctx->bc->cf_last->id + 2;
7640 }
7641 fc_poplevel(ctx);
7642
7643 callstack_pop(ctx, FC_PUSH_VPM);
7644 return 0;
7645 }
7646
7647 static int tgsi_bgnloop(struct r600_shader_ctx *ctx)
7648 {
7649 /* LOOP_START_DX10 ignores the LOOP_CONFIG* registers, so it is not
7650 * limited to 4096 iterations, like the other LOOP_* instructions. */
7651 r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_START_DX10);
7652
7653 fc_pushlevel(ctx, FC_LOOP);
7654
7655 /* check stack depth */
7656 callstack_push(ctx, FC_LOOP);
7657 return 0;
7658 }
7659
7660 static int tgsi_endloop(struct r600_shader_ctx *ctx)
7661 {
7662 int i;
7663
7664 r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_END);
7665
7666 if (ctx->bc->fc_stack[ctx->bc->fc_sp].type != FC_LOOP) {
7667 R600_ERR("loop/endloop in shader code are not paired.\n");
7668 return -EINVAL;
7669 }
7670
7671 /* fixup loop pointers - from r600isa
7672 LOOP END points to CF after LOOP START,
7673 LOOP START point to CF after LOOP END
7674 BRK/CONT point to LOOP END CF
7675 */
7676 ctx->bc->cf_last->cf_addr = ctx->bc->fc_stack[ctx->bc->fc_sp].start->id + 2;
7677
7678 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id + 2;
7679
7680 for (i = 0; i < ctx->bc->fc_stack[ctx->bc->fc_sp].num_mid; i++) {
7681 ctx->bc->fc_stack[ctx->bc->fc_sp].mid[i]->cf_addr = ctx->bc->cf_last->id;
7682 }
7683 /* XXX add LOOPRET support */
7684 fc_poplevel(ctx);
7685 callstack_pop(ctx, FC_LOOP);
7686 return 0;
7687 }
7688
7689 static int tgsi_loop_breakc(struct r600_shader_ctx *ctx)
7690 {
7691 int r;
7692 unsigned int fscp;
7693
7694 for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--)
7695 {
7696 if (FC_LOOP == ctx->bc->fc_stack[fscp].type)
7697 break;
7698 }
7699 if (fscp == 0) {
7700 R600_ERR("BREAKC not inside loop/endloop pair\n");
7701 return -EINVAL;
7702 }
7703
7704 if (ctx->bc->chip_class == EVERGREEN &&
7705 ctx->bc->family != CHIP_CYPRESS &&
7706 ctx->bc->family != CHIP_JUNIPER) {
7707 /* HW bug: ALU_BREAK does not save the active mask correctly */
7708 r = tgsi_uif(ctx);
7709 if (r)
7710 return r;
7711
7712 r = r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_BREAK);
7713 if (r)
7714 return r;
7715 fc_set_mid(ctx, fscp);
7716
7717 return tgsi_endif(ctx);
7718 } else {
7719 r = emit_logic_pred(ctx, ALU_OP2_PRED_SETE_INT, CF_OP_ALU_BREAK);
7720 if (r)
7721 return r;
7722 fc_set_mid(ctx, fscp);
7723 }
7724
7725 return 0;
7726 }
7727
7728 static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx)
7729 {
7730 unsigned int fscp;
7731
7732 for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--)
7733 {
7734 if (FC_LOOP == ctx->bc->fc_stack[fscp].type)
7735 break;
7736 }
7737
7738 if (fscp == 0) {
7739 R600_ERR("Break not inside loop/endloop pair\n");
7740 return -EINVAL;
7741 }
7742
7743 r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
7744
7745 fc_set_mid(ctx, fscp);
7746
7747 return 0;
7748 }
7749
7750 static int tgsi_gs_emit(struct r600_shader_ctx *ctx)
7751 {
7752 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7753 int stream = ctx->literals[inst->Src[0].Register.Index * 4 + inst->Src[0].Register.SwizzleX];
7754 int r;
7755
7756 if (ctx->inst_info->op == CF_OP_EMIT_VERTEX)
7757 emit_gs_ring_writes(ctx, ctx->gs_stream_output_info, stream, TRUE);
7758
7759 r = r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
7760 if (!r)
7761 ctx->bc->cf_last->count = stream; // Count field for CUT/EMIT_VERTEX indicates which stream
7762 return r;
7763 }
7764
7765 static int tgsi_umad(struct r600_shader_ctx *ctx)
7766 {
7767 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7768 struct r600_bytecode_alu alu;
7769 int i, j, k, r;
7770 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
7771
7772 /* src0 * src1 */
7773 for (i = 0; i < lasti + 1; i++) {
7774 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
7775 continue;
7776
7777 if (ctx->bc->chip_class == CAYMAN) {
7778 for (j = 0 ; j < 4; j++) {
7779 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7780
7781 alu.op = ALU_OP2_MULLO_UINT;
7782 for (k = 0; k < inst->Instruction.NumSrcRegs; k++) {
7783 r600_bytecode_src(&alu.src[k], &ctx->src[k], i);
7784 }
7785 alu.dst.chan = j;
7786 alu.dst.sel = ctx->temp_reg;
7787 alu.dst.write = (j == i);
7788 if (j == 3)
7789 alu.last = 1;
7790 r = r600_bytecode_add_alu(ctx->bc, &alu);
7791 if (r)
7792 return r;
7793 }
7794 } else {
7795 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7796
7797 alu.dst.chan = i;
7798 alu.dst.sel = ctx->temp_reg;
7799 alu.dst.write = 1;
7800
7801 alu.op = ALU_OP2_MULLO_UINT;
7802 for (j = 0; j < 2; j++) {
7803 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
7804 }
7805
7806 alu.last = 1;
7807 r = r600_bytecode_add_alu(ctx->bc, &alu);
7808 if (r)
7809 return r;
7810 }
7811 }
7812
7813
7814 for (i = 0; i < lasti + 1; i++) {
7815 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
7816 continue;
7817
7818 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7819 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
7820
7821 alu.op = ALU_OP2_ADD_INT;
7822
7823 alu.src[0].sel = ctx->temp_reg;
7824 alu.src[0].chan = i;
7825
7826 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
7827 if (i == lasti) {
7828 alu.last = 1;
7829 }
7830 r = r600_bytecode_add_alu(ctx->bc, &alu);
7831 if (r)
7832 return r;
7833 }
7834 return 0;
7835 }
7836
7837 static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] = {
7838 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_r600_arl},
7839 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
7840 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
7841
7842 /* XXX:
7843 * For state trackers other than OpenGL, we'll want to use
7844 * _RECIP_IEEE instead.
7845 */
7846 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_CLAMPED, tgsi_trans_srcx_replicate},
7847
7848 [TGSI_OPCODE_RSQ] = { ALU_OP0_NOP, tgsi_rsq},
7849 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
7850 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
7851 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL, tgsi_op2},
7852 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
7853 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4, tgsi_dp},
7854 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4, tgsi_dp},
7855 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
7856 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN, tgsi_op2},
7857 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX, tgsi_op2},
7858 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
7859 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
7860 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD, tgsi_op3},
7861 [TGSI_OPCODE_SUB] = { ALU_OP2_ADD, tgsi_op2},
7862 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
7863 [TGSI_OPCODE_FMA] = { ALU_OP0_NOP, tgsi_unsupported},
7864 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, tgsi_trans_srcx_replicate},
7865 [TGSI_OPCODE_DP2A] = { ALU_OP0_NOP, tgsi_unsupported},
7866 [22] = { ALU_OP0_NOP, tgsi_unsupported},
7867 [23] = { ALU_OP0_NOP, tgsi_unsupported},
7868 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
7869 [TGSI_OPCODE_CLAMP] = { ALU_OP0_NOP, tgsi_unsupported},
7870 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
7871 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
7872 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, tgsi_trans_srcx_replicate},
7873 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, tgsi_trans_srcx_replicate},
7874 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, tgsi_pow},
7875 [TGSI_OPCODE_XPD] = { ALU_OP0_NOP, tgsi_xpd},
7876 [32] = { ALU_OP0_NOP, tgsi_unsupported},
7877 [TGSI_OPCODE_ABS] = { ALU_OP1_MOV, tgsi_op2},
7878 [34] = { ALU_OP0_NOP, tgsi_unsupported},
7879 [TGSI_OPCODE_DPH] = { ALU_OP2_DOT4, tgsi_dp},
7880 [TGSI_OPCODE_COS] = { ALU_OP1_COS, tgsi_trig},
7881 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
7882 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
7883 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
7884 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_unsupported},
7885 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
7886 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
7887 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
7888 [44] = { ALU_OP0_NOP, tgsi_unsupported},
7889 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
7890 [46] = { ALU_OP0_NOP, tgsi_unsupported},
7891 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
7892 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, tgsi_trig},
7893 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
7894 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
7895 [51] = { ALU_OP0_NOP, tgsi_unsupported},
7896 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
7897 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
7898 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
7899 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_unsupported},
7900 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
7901 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
7902 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
7903 [59] = { ALU_OP0_NOP, tgsi_unsupported},
7904 [60] = { ALU_OP0_NOP, tgsi_unsupported},
7905 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_r600_arl},
7906 [62] = { ALU_OP0_NOP, tgsi_unsupported},
7907 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
7908 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
7909 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
7910 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
7911 [TGSI_OPCODE_SCS] = { ALU_OP0_NOP, tgsi_scs},
7912 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
7913 [69] = { ALU_OP0_NOP, tgsi_unsupported},
7914 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
7915 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4, tgsi_dp},
7916 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
7917 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
7918 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
7919 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
7920 [76] = { ALU_OP0_NOP, tgsi_unsupported},
7921 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
7922 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
7923 [TGSI_OPCODE_DDX_FINE] = { ALU_OP0_NOP, tgsi_unsupported},
7924 [TGSI_OPCODE_DDY_FINE] = { ALU_OP0_NOP, tgsi_unsupported},
7925 [TGSI_OPCODE_PUSHA] = { ALU_OP0_NOP, tgsi_unsupported},
7926 [TGSI_OPCODE_POPA] = { ALU_OP0_NOP, tgsi_unsupported},
7927 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
7928 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2_trans},
7929 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
7930 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
7931 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2_trans},
7932 [88] = { ALU_OP0_NOP, tgsi_unsupported},
7933 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
7934 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
7935 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
7936 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
7937 [TGSI_OPCODE_SAD] = { ALU_OP0_NOP, tgsi_unsupported},
7938 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
7939 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
7940 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
7941 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
7942 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
7943 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
7944 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
7945 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
7946 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
7947 [TGSI_OPCODE_TXQ_LZ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
7948 [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
7949 [105] = { ALU_OP0_NOP, tgsi_unsupported},
7950 [106] = { ALU_OP0_NOP, tgsi_unsupported},
7951 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
7952 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
7953 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
7954 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
7955 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
7956 [112] = { ALU_OP0_NOP, tgsi_unsupported},
7957 [TGSI_OPCODE_CALLNZ] = { ALU_OP0_NOP, tgsi_unsupported},
7958 [114] = { ALU_OP0_NOP, tgsi_unsupported},
7959 [TGSI_OPCODE_BREAKC] = { ALU_OP0_NOP, tgsi_loop_breakc},
7960 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
7961 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
7962 [118] = { ALU_OP0_NOP, tgsi_unsupported},
7963 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_op2_trans},
7964 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
7965 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
7966 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
7967 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
7968 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
7969 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2_trans},
7970 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
7971 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_op2_trans},
7972 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2_trans},
7973 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
7974 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
7975 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
7976 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
7977 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
7978 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
7979 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_UINT, tgsi_op2_trans},
7980 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
7981 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
7982 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2_trans},
7983 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
7984 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2_swap},
7985 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
7986 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
7987 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
7988 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
7989 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
7990 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
7991 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
7992 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
7993 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
7994 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
7995 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
7996 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
7997 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
7998 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
7999 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
8000 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
8001 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_r600_arl},
8002 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
8003 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
8004 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
8005 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_unsupported},
8006 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_unsupported},
8007 [TGSI_OPCODE_MFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8008 [TGSI_OPCODE_LFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8009 [TGSI_OPCODE_SFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8010 [TGSI_OPCODE_BARRIER] = { ALU_OP0_NOP, tgsi_unsupported},
8011 [TGSI_OPCODE_ATOMUADD] = { ALU_OP0_NOP, tgsi_unsupported},
8012 [TGSI_OPCODE_ATOMXCHG] = { ALU_OP0_NOP, tgsi_unsupported},
8013 [TGSI_OPCODE_ATOMCAS] = { ALU_OP0_NOP, tgsi_unsupported},
8014 [TGSI_OPCODE_ATOMAND] = { ALU_OP0_NOP, tgsi_unsupported},
8015 [TGSI_OPCODE_ATOMOR] = { ALU_OP0_NOP, tgsi_unsupported},
8016 [TGSI_OPCODE_ATOMXOR] = { ALU_OP0_NOP, tgsi_unsupported},
8017 [TGSI_OPCODE_ATOMUMIN] = { ALU_OP0_NOP, tgsi_unsupported},
8018 [TGSI_OPCODE_ATOMUMAX] = { ALU_OP0_NOP, tgsi_unsupported},
8019 [TGSI_OPCODE_ATOMIMIN] = { ALU_OP0_NOP, tgsi_unsupported},
8020 [TGSI_OPCODE_ATOMIMAX] = { ALU_OP0_NOP, tgsi_unsupported},
8021 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
8022 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
8023 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
8024 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, tgsi_op2_trans},
8025 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, tgsi_op2_trans},
8026 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_unsupported},
8027 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_unsupported},
8028 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_unsupported},
8029 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_unsupported},
8030 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_unsupported},
8031 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_unsupported},
8032 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_unsupported},
8033 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_unsupported},
8034 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_unsupported},
8035 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_unsupported},
8036 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_unsupported},
8037 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_unsupported},
8038 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_unsupported},
8039 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
8040 };
8041
8042 static const struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = {
8043 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_eg_arl},
8044 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
8045 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
8046 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, tgsi_trans_srcx_replicate},
8047 [TGSI_OPCODE_RSQ] = { ALU_OP1_RECIPSQRT_IEEE, tgsi_rsq},
8048 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
8049 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
8050 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL, tgsi_op2},
8051 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
8052 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4, tgsi_dp},
8053 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4, tgsi_dp},
8054 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
8055 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN, tgsi_op2},
8056 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX, tgsi_op2},
8057 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
8058 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
8059 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD, tgsi_op3},
8060 [TGSI_OPCODE_SUB] = { ALU_OP2_ADD, tgsi_op2},
8061 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
8062 [TGSI_OPCODE_FMA] = { ALU_OP0_NOP, tgsi_unsupported},
8063 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, tgsi_trans_srcx_replicate},
8064 [TGSI_OPCODE_DP2A] = { ALU_OP0_NOP, tgsi_unsupported},
8065 [22] = { ALU_OP0_NOP, tgsi_unsupported},
8066 [23] = { ALU_OP0_NOP, tgsi_unsupported},
8067 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
8068 [TGSI_OPCODE_CLAMP] = { ALU_OP0_NOP, tgsi_unsupported},
8069 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
8070 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
8071 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, tgsi_trans_srcx_replicate},
8072 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, tgsi_trans_srcx_replicate},
8073 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, tgsi_pow},
8074 [TGSI_OPCODE_XPD] = { ALU_OP0_NOP, tgsi_xpd},
8075 [32] = { ALU_OP0_NOP, tgsi_unsupported},
8076 [TGSI_OPCODE_ABS] = { ALU_OP1_MOV, tgsi_op2},
8077 [34] = { ALU_OP0_NOP, tgsi_unsupported},
8078 [TGSI_OPCODE_DPH] = { ALU_OP2_DOT4, tgsi_dp},
8079 [TGSI_OPCODE_COS] = { ALU_OP1_COS, tgsi_trig},
8080 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
8081 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
8082 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
8083 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_unsupported},
8084 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
8085 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
8086 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
8087 [44] = { ALU_OP0_NOP, tgsi_unsupported},
8088 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
8089 [46] = { ALU_OP0_NOP, tgsi_unsupported},
8090 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
8091 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, tgsi_trig},
8092 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
8093 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
8094 [51] = { ALU_OP0_NOP, tgsi_unsupported},
8095 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
8096 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
8097 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
8098 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_unsupported},
8099 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
8100 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
8101 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
8102 [59] = { ALU_OP0_NOP, tgsi_unsupported},
8103 [60] = { ALU_OP0_NOP, tgsi_unsupported},
8104 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_eg_arl},
8105 [62] = { ALU_OP0_NOP, tgsi_unsupported},
8106 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
8107 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
8108 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
8109 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
8110 [TGSI_OPCODE_SCS] = { ALU_OP0_NOP, tgsi_scs},
8111 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
8112 [69] = { ALU_OP0_NOP, tgsi_unsupported},
8113 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
8114 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4, tgsi_dp},
8115 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
8116 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
8117 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
8118 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
8119 [76] = { ALU_OP0_NOP, tgsi_unsupported},
8120 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
8121 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
8122 [TGSI_OPCODE_DDX_FINE] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
8123 [TGSI_OPCODE_DDY_FINE] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
8124 [TGSI_OPCODE_PUSHA] = { ALU_OP0_NOP, tgsi_unsupported},
8125 [TGSI_OPCODE_POPA] = { ALU_OP0_NOP, tgsi_unsupported},
8126 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
8127 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2_trans},
8128 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
8129 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
8130 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2},
8131 [88] = { ALU_OP0_NOP, tgsi_unsupported},
8132 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
8133 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
8134 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
8135 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
8136 [TGSI_OPCODE_SAD] = { ALU_OP0_NOP, tgsi_unsupported},
8137 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
8138 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
8139 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
8140 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
8141 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
8142 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
8143 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
8144 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
8145 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
8146 [TGSI_OPCODE_TXQ_LZ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
8147 [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
8148 [105] = { ALU_OP0_NOP, tgsi_unsupported},
8149 [106] = { ALU_OP0_NOP, tgsi_unsupported},
8150 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
8151 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
8152 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
8153 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
8154 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
8155 [112] = { ALU_OP0_NOP, tgsi_unsupported},
8156 [TGSI_OPCODE_CALLNZ] = { ALU_OP0_NOP, tgsi_unsupported},
8157 [114] = { ALU_OP0_NOP, tgsi_unsupported},
8158 [TGSI_OPCODE_BREAKC] = { ALU_OP0_NOP, tgsi_unsupported},
8159 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
8160 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
8161 [118] = { ALU_OP0_NOP, tgsi_unsupported},
8162 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_f2i},
8163 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
8164 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
8165 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
8166 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
8167 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
8168 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2},
8169 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
8170 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_f2i},
8171 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2_trans},
8172 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
8173 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
8174 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
8175 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
8176 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
8177 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
8178 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_UINT, tgsi_op2_trans},
8179 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
8180 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
8181 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2},
8182 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
8183 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2},
8184 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
8185 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
8186 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
8187 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
8188 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
8189 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
8190 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
8191 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
8192 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
8193 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
8194 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
8195 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
8196 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
8197 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
8198 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
8199 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
8200 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_eg_arl},
8201 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
8202 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
8203 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
8204 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_unsupported},
8205 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_unsupported},
8206 [TGSI_OPCODE_MFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8207 [TGSI_OPCODE_LFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8208 [TGSI_OPCODE_SFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8209 [TGSI_OPCODE_BARRIER] = { ALU_OP0_NOP, tgsi_unsupported},
8210 [TGSI_OPCODE_ATOMUADD] = { ALU_OP0_NOP, tgsi_unsupported},
8211 [TGSI_OPCODE_ATOMXCHG] = { ALU_OP0_NOP, tgsi_unsupported},
8212 [TGSI_OPCODE_ATOMCAS] = { ALU_OP0_NOP, tgsi_unsupported},
8213 [TGSI_OPCODE_ATOMAND] = { ALU_OP0_NOP, tgsi_unsupported},
8214 [TGSI_OPCODE_ATOMOR] = { ALU_OP0_NOP, tgsi_unsupported},
8215 [TGSI_OPCODE_ATOMXOR] = { ALU_OP0_NOP, tgsi_unsupported},
8216 [TGSI_OPCODE_ATOMUMIN] = { ALU_OP0_NOP, tgsi_unsupported},
8217 [TGSI_OPCODE_ATOMUMAX] = { ALU_OP0_NOP, tgsi_unsupported},
8218 [TGSI_OPCODE_ATOMIMIN] = { ALU_OP0_NOP, tgsi_unsupported},
8219 [TGSI_OPCODE_ATOMIMAX] = { ALU_OP0_NOP, tgsi_unsupported},
8220 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
8221 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
8222 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
8223 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, tgsi_op2_trans},
8224 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, tgsi_op2_trans},
8225 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_tex},
8226 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_tex},
8227 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_op3},
8228 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_op3},
8229 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_bfi},
8230 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_op2},
8231 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_op2},
8232 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_op2},
8233 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_msb},
8234 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_msb},
8235 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_interp_egcm},
8236 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_interp_egcm},
8237 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_interp_egcm},
8238 [TGSI_OPCODE_F2D] = { ALU_OP1_FLT32_TO_FLT64, tgsi_op2_64},
8239 [TGSI_OPCODE_D2F] = { ALU_OP1_FLT64_TO_FLT32, tgsi_op2_64_single_dest},
8240 [TGSI_OPCODE_DABS] = { ALU_OP1_MOV, tgsi_op2_64},
8241 [TGSI_OPCODE_DNEG] = { ALU_OP2_ADD_64, tgsi_dneg},
8242 [TGSI_OPCODE_DADD] = { ALU_OP2_ADD_64, tgsi_op2_64},
8243 [TGSI_OPCODE_DMUL] = { ALU_OP2_MUL_64, cayman_mul_double_instr},
8244 [TGSI_OPCODE_DMAX] = { ALU_OP2_MAX_64, tgsi_op2_64},
8245 [TGSI_OPCODE_DMIN] = { ALU_OP2_MIN_64, tgsi_op2_64},
8246 [TGSI_OPCODE_DSLT] = { ALU_OP2_SETGT_64, tgsi_op2_64_single_dest_s},
8247 [TGSI_OPCODE_DSGE] = { ALU_OP2_SETGE_64, tgsi_op2_64_single_dest},
8248 [TGSI_OPCODE_DSEQ] = { ALU_OP2_SETE_64, tgsi_op2_64_single_dest},
8249 [TGSI_OPCODE_DSNE] = { ALU_OP2_SETNE_64, tgsi_op2_64_single_dest},
8250 [TGSI_OPCODE_DRCP] = { ALU_OP2_RECIP_64, cayman_emit_double_instr},
8251 [TGSI_OPCODE_DSQRT] = { ALU_OP2_SQRT_64, cayman_emit_double_instr},
8252 [TGSI_OPCODE_DMAD] = { ALU_OP3_FMA_64, tgsi_op3_64},
8253 [TGSI_OPCODE_DFRAC] = { ALU_OP1_FRACT_64, tgsi_op2_64},
8254 [TGSI_OPCODE_DLDEXP] = { ALU_OP2_LDEXP_64, tgsi_op2_64},
8255 [TGSI_OPCODE_DFRACEXP] = { ALU_OP1_FREXP_64, tgsi_dfracexp},
8256 [TGSI_OPCODE_D2I] = { ALU_OP1_FLT_TO_INT, egcm_double_to_int},
8257 [TGSI_OPCODE_I2D] = { ALU_OP1_INT_TO_FLT, egcm_int_to_double},
8258 [TGSI_OPCODE_D2U] = { ALU_OP1_FLT_TO_UINT, egcm_double_to_int},
8259 [TGSI_OPCODE_U2D] = { ALU_OP1_UINT_TO_FLT, egcm_int_to_double},
8260 [TGSI_OPCODE_DRSQ] = { ALU_OP2_RECIPSQRT_64, cayman_emit_double_instr},
8261 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
8262 };
8263
8264 static const struct r600_shader_tgsi_instruction cm_shader_tgsi_instruction[] = {
8265 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_eg_arl},
8266 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
8267 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
8268 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, cayman_emit_float_instr},
8269 [TGSI_OPCODE_RSQ] = { ALU_OP1_RECIPSQRT_IEEE, cayman_emit_float_instr},
8270 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
8271 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
8272 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL, tgsi_op2},
8273 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
8274 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4, tgsi_dp},
8275 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4, tgsi_dp},
8276 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
8277 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN, tgsi_op2},
8278 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX, tgsi_op2},
8279 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
8280 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
8281 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD, tgsi_op3},
8282 [TGSI_OPCODE_SUB] = { ALU_OP2_ADD, tgsi_op2},
8283 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
8284 [TGSI_OPCODE_FMA] = { ALU_OP0_NOP, tgsi_unsupported},
8285 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, cayman_emit_float_instr},
8286 [TGSI_OPCODE_DP2A] = { ALU_OP0_NOP, tgsi_unsupported},
8287 [22] = { ALU_OP0_NOP, tgsi_unsupported},
8288 [23] = { ALU_OP0_NOP, tgsi_unsupported},
8289 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
8290 [TGSI_OPCODE_CLAMP] = { ALU_OP0_NOP, tgsi_unsupported},
8291 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
8292 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
8293 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, cayman_emit_float_instr},
8294 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, cayman_emit_float_instr},
8295 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, cayman_pow},
8296 [TGSI_OPCODE_XPD] = { ALU_OP0_NOP, tgsi_xpd},
8297 [32] = { ALU_OP0_NOP, tgsi_unsupported},
8298 [TGSI_OPCODE_ABS] = { ALU_OP1_MOV, tgsi_op2},
8299 [34] = { ALU_OP0_NOP, tgsi_unsupported},
8300 [TGSI_OPCODE_DPH] = { ALU_OP2_DOT4, tgsi_dp},
8301 [TGSI_OPCODE_COS] = { ALU_OP1_COS, cayman_trig},
8302 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
8303 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
8304 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
8305 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_unsupported},
8306 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
8307 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
8308 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
8309 [44] = { ALU_OP0_NOP, tgsi_unsupported},
8310 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
8311 [46] = { ALU_OP0_NOP, tgsi_unsupported},
8312 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
8313 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, cayman_trig},
8314 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
8315 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
8316 [51] = { ALU_OP0_NOP, tgsi_unsupported},
8317 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
8318 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
8319 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
8320 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_unsupported},
8321 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
8322 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
8323 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
8324 [59] = { ALU_OP0_NOP, tgsi_unsupported},
8325 [60] = { ALU_OP0_NOP, tgsi_unsupported},
8326 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_eg_arl},
8327 [62] = { ALU_OP0_NOP, tgsi_unsupported},
8328 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
8329 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
8330 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
8331 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
8332 [TGSI_OPCODE_SCS] = { ALU_OP0_NOP, tgsi_scs},
8333 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
8334 [69] = { ALU_OP0_NOP, tgsi_unsupported},
8335 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
8336 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4, tgsi_dp},
8337 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
8338 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
8339 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
8340 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
8341 [76] = { ALU_OP0_NOP, tgsi_unsupported},
8342 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
8343 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
8344 [TGSI_OPCODE_DDX_FINE] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
8345 [TGSI_OPCODE_DDY_FINE] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
8346 [TGSI_OPCODE_PUSHA] = { ALU_OP0_NOP, tgsi_unsupported},
8347 [TGSI_OPCODE_POPA] = { ALU_OP0_NOP, tgsi_unsupported},
8348 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
8349 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2},
8350 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
8351 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
8352 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2},
8353 [88] = { ALU_OP0_NOP, tgsi_unsupported},
8354 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
8355 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
8356 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
8357 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
8358 [TGSI_OPCODE_SAD] = { ALU_OP0_NOP, tgsi_unsupported},
8359 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
8360 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
8361 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
8362 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
8363 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
8364 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
8365 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
8366 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
8367 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
8368 [TGSI_OPCODE_TXQ_LZ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
8369 [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
8370 [105] = { ALU_OP0_NOP, tgsi_unsupported},
8371 [106] = { ALU_OP0_NOP, tgsi_unsupported},
8372 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
8373 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
8374 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
8375 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
8376 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
8377 [112] = { ALU_OP0_NOP, tgsi_unsupported},
8378 [TGSI_OPCODE_CALLNZ] = { ALU_OP0_NOP, tgsi_unsupported},
8379 [114] = { ALU_OP0_NOP, tgsi_unsupported},
8380 [TGSI_OPCODE_BREAKC] = { ALU_OP0_NOP, tgsi_unsupported},
8381 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
8382 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
8383 [118] = { ALU_OP0_NOP, tgsi_unsupported},
8384 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_op2},
8385 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
8386 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
8387 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
8388 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
8389 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
8390 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2},
8391 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
8392 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_op2},
8393 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2},
8394 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
8395 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
8396 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
8397 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
8398 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
8399 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
8400 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_INT, cayman_mul_int_instr},
8401 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
8402 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
8403 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2},
8404 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
8405 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2},
8406 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
8407 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
8408 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
8409 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
8410 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
8411 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
8412 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
8413 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
8414 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
8415 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
8416 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
8417 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
8418 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
8419 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
8420 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
8421 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
8422 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_eg_arl},
8423 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
8424 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
8425 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
8426 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_unsupported},
8427 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_unsupported},
8428 [TGSI_OPCODE_MFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8429 [TGSI_OPCODE_LFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8430 [TGSI_OPCODE_SFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8431 [TGSI_OPCODE_BARRIER] = { ALU_OP0_NOP, tgsi_unsupported},
8432 [TGSI_OPCODE_ATOMUADD] = { ALU_OP0_NOP, tgsi_unsupported},
8433 [TGSI_OPCODE_ATOMXCHG] = { ALU_OP0_NOP, tgsi_unsupported},
8434 [TGSI_OPCODE_ATOMCAS] = { ALU_OP0_NOP, tgsi_unsupported},
8435 [TGSI_OPCODE_ATOMAND] = { ALU_OP0_NOP, tgsi_unsupported},
8436 [TGSI_OPCODE_ATOMOR] = { ALU_OP0_NOP, tgsi_unsupported},
8437 [TGSI_OPCODE_ATOMXOR] = { ALU_OP0_NOP, tgsi_unsupported},
8438 [TGSI_OPCODE_ATOMUMIN] = { ALU_OP0_NOP, tgsi_unsupported},
8439 [TGSI_OPCODE_ATOMUMAX] = { ALU_OP0_NOP, tgsi_unsupported},
8440 [TGSI_OPCODE_ATOMIMIN] = { ALU_OP0_NOP, tgsi_unsupported},
8441 [TGSI_OPCODE_ATOMIMAX] = { ALU_OP0_NOP, tgsi_unsupported},
8442 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
8443 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
8444 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
8445 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, cayman_mul_int_instr},
8446 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, cayman_mul_int_instr},
8447 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_tex},
8448 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_tex},
8449 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_op3},
8450 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_op3},
8451 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_bfi},
8452 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_op2},
8453 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_op2},
8454 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_op2},
8455 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_msb},
8456 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_msb},
8457 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_interp_egcm},
8458 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_interp_egcm},
8459 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_interp_egcm},
8460 [TGSI_OPCODE_F2D] = { ALU_OP1_FLT32_TO_FLT64, tgsi_op2_64},
8461 [TGSI_OPCODE_D2F] = { ALU_OP1_FLT64_TO_FLT32, tgsi_op2_64_single_dest},
8462 [TGSI_OPCODE_DABS] = { ALU_OP1_MOV, tgsi_op2_64},
8463 [TGSI_OPCODE_DNEG] = { ALU_OP2_ADD_64, tgsi_dneg},
8464 [TGSI_OPCODE_DADD] = { ALU_OP2_ADD_64, tgsi_op2_64},
8465 [TGSI_OPCODE_DMUL] = { ALU_OP2_MUL_64, cayman_mul_double_instr},
8466 [TGSI_OPCODE_DMAX] = { ALU_OP2_MAX_64, tgsi_op2_64},
8467 [TGSI_OPCODE_DMIN] = { ALU_OP2_MIN_64, tgsi_op2_64},
8468 [TGSI_OPCODE_DSLT] = { ALU_OP2_SETGT_64, tgsi_op2_64_single_dest_s},
8469 [TGSI_OPCODE_DSGE] = { ALU_OP2_SETGE_64, tgsi_op2_64_single_dest},
8470 [TGSI_OPCODE_DSEQ] = { ALU_OP2_SETE_64, tgsi_op2_64_single_dest},
8471 [TGSI_OPCODE_DSNE] = { ALU_OP2_SETNE_64, tgsi_op2_64_single_dest},
8472 [TGSI_OPCODE_DRCP] = { ALU_OP2_RECIP_64, cayman_emit_double_instr},
8473 [TGSI_OPCODE_DSQRT] = { ALU_OP2_SQRT_64, cayman_emit_double_instr},
8474 [TGSI_OPCODE_DMAD] = { ALU_OP3_FMA_64, tgsi_op3_64},
8475 [TGSI_OPCODE_DFRAC] = { ALU_OP1_FRACT_64, tgsi_op2_64},
8476 [TGSI_OPCODE_DLDEXP] = { ALU_OP2_LDEXP_64, tgsi_op2_64},
8477 [TGSI_OPCODE_DFRACEXP] = { ALU_OP1_FREXP_64, tgsi_dfracexp},
8478 [TGSI_OPCODE_D2I] = { ALU_OP1_FLT_TO_INT, egcm_double_to_int},
8479 [TGSI_OPCODE_I2D] = { ALU_OP1_INT_TO_FLT, egcm_int_to_double},
8480 [TGSI_OPCODE_D2U] = { ALU_OP1_FLT_TO_UINT, egcm_double_to_int},
8481 [TGSI_OPCODE_U2D] = { ALU_OP1_UINT_TO_FLT, egcm_int_to_double},
8482 [TGSI_OPCODE_DRSQ] = { ALU_OP2_RECIPSQRT_64, cayman_emit_double_instr},
8483 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
8484 };