Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / gallium / drivers / r600 / r600_shader.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "r600_sq.h"
24 #include "r600_llvm.h"
25 #include "r600_formats.h"
26 #include "r600_opcodes.h"
27 #include "r600_shader.h"
28 #include "r600d.h"
29
30 #include "sb/sb_public.h"
31
32 #include "pipe/p_shader_tokens.h"
33 #include "tgsi/tgsi_info.h"
34 #include "tgsi/tgsi_parse.h"
35 #include "tgsi/tgsi_scan.h"
36 #include "tgsi/tgsi_dump.h"
37 #include "util/u_memory.h"
38 #include "util/u_math.h"
39 #include <stdio.h>
40 #include <errno.h>
41
42 /* CAYMAN notes
43 Why CAYMAN got loops for lots of instructions is explained here.
44
45 -These 8xx t-slot only ops are implemented in all vector slots.
46 MUL_LIT, FLT_TO_UINT, INT_TO_FLT, UINT_TO_FLT
47 These 8xx t-slot only opcodes become vector ops, with all four
48 slots expecting the arguments on sources a and b. Result is
49 broadcast to all channels.
50 MULLO_INT, MULHI_INT, MULLO_UINT, MULHI_UINT, MUL_64
51 These 8xx t-slot only opcodes become vector ops in the z, y, and
52 x slots.
53 EXP_IEEE, LOG_IEEE/CLAMPED, RECIP_IEEE/CLAMPED/FF/INT/UINT/_64/CLAMPED_64
54 RECIPSQRT_IEEE/CLAMPED/FF/_64/CLAMPED_64
55 SQRT_IEEE/_64
56 SIN/COS
57 The w slot may have an independent co-issued operation, or if the
58 result is required to be in the w slot, the opcode above may be
59 issued in the w slot as well.
60 The compiler must issue the source argument to slots z, y, and x
61 */
62
63 #define R600_SHADER_BUFFER_INFO_SEL (512 + R600_BUFFER_INFO_OFFSET / 16)
64 static int r600_shader_from_tgsi(struct r600_context *rctx,
65 struct r600_pipe_shader *pipeshader,
66 union r600_shader_key key);
67
68
69 static void r600_add_gpr_array(struct r600_shader *ps, int start_gpr,
70 int size, unsigned comp_mask) {
71
72 if (!size)
73 return;
74
75 if (ps->num_arrays == ps->max_arrays) {
76 ps->max_arrays += 64;
77 ps->arrays = realloc(ps->arrays, ps->max_arrays *
78 sizeof(struct r600_shader_array));
79 }
80
81 int n = ps->num_arrays;
82 ++ps->num_arrays;
83
84 ps->arrays[n].comp_mask = comp_mask;
85 ps->arrays[n].gpr_start = start_gpr;
86 ps->arrays[n].gpr_count = size;
87 }
88
89 static void r600_dump_streamout(struct pipe_stream_output_info *so)
90 {
91 unsigned i;
92
93 fprintf(stderr, "STREAMOUT\n");
94 for (i = 0; i < so->num_outputs; i++) {
95 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
96 so->output[i].start_component;
97 fprintf(stderr, " %i: MEM_STREAM%d_BUF%i[%i..%i] <- OUT[%i].%s%s%s%s%s\n",
98 i,
99 so->output[i].stream,
100 so->output[i].output_buffer,
101 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
102 so->output[i].register_index,
103 mask & 1 ? "x" : "",
104 mask & 2 ? "y" : "",
105 mask & 4 ? "z" : "",
106 mask & 8 ? "w" : "",
107 so->output[i].dst_offset < so->output[i].start_component ? " (will lower)" : "");
108 }
109 }
110
111 static int store_shader(struct pipe_context *ctx,
112 struct r600_pipe_shader *shader)
113 {
114 struct r600_context *rctx = (struct r600_context *)ctx;
115 uint32_t *ptr, i;
116
117 if (shader->bo == NULL) {
118 shader->bo = (struct r600_resource*)
119 pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE, shader->shader.bc.ndw * 4);
120 if (shader->bo == NULL) {
121 return -ENOMEM;
122 }
123 ptr = r600_buffer_map_sync_with_rings(&rctx->b, shader->bo, PIPE_TRANSFER_WRITE);
124 if (R600_BIG_ENDIAN) {
125 for (i = 0; i < shader->shader.bc.ndw; ++i) {
126 ptr[i] = util_cpu_to_le32(shader->shader.bc.bytecode[i]);
127 }
128 } else {
129 memcpy(ptr, shader->shader.bc.bytecode, shader->shader.bc.ndw * sizeof(*ptr));
130 }
131 rctx->b.ws->buffer_unmap(shader->bo->cs_buf);
132 }
133
134 return 0;
135 }
136
137 int r600_pipe_shader_create(struct pipe_context *ctx,
138 struct r600_pipe_shader *shader,
139 union r600_shader_key key)
140 {
141 struct r600_context *rctx = (struct r600_context *)ctx;
142 struct r600_pipe_shader_selector *sel = shader->selector;
143 int r;
144 bool dump = r600_can_dump_shader(&rctx->screen->b, sel->tokens);
145 unsigned use_sb = !(rctx->screen->b.debug_flags & DBG_NO_SB);
146 unsigned sb_disasm = use_sb || (rctx->screen->b.debug_flags & DBG_SB_DISASM);
147 unsigned export_shader;
148
149 shader->shader.bc.isa = rctx->isa;
150
151 if (dump) {
152 fprintf(stderr, "--------------------------------------------------------------\n");
153 tgsi_dump(sel->tokens, 0);
154
155 if (sel->so.num_outputs) {
156 r600_dump_streamout(&sel->so);
157 }
158 }
159 r = r600_shader_from_tgsi(rctx, shader, key);
160 if (r) {
161 R600_ERR("translation from TGSI failed !\n");
162 goto error;
163 }
164
165 /* disable SB for geom shaders on R6xx/R7xx due to some mysterious gs piglit regressions with it enabled. */
166 if (rctx->b.chip_class <= R700) {
167 use_sb &= (shader->shader.processor_type != TGSI_PROCESSOR_GEOMETRY);
168 }
169 /* disable SB for shaders using doubles */
170 use_sb &= !shader->shader.uses_doubles;
171
172 /* Check if the bytecode has already been built. When using the llvm
173 * backend, r600_shader_from_tgsi() will take care of building the
174 * bytecode.
175 */
176 if (!shader->shader.bc.bytecode) {
177 r = r600_bytecode_build(&shader->shader.bc);
178 if (r) {
179 R600_ERR("building bytecode failed !\n");
180 goto error;
181 }
182 }
183
184 if (dump && !sb_disasm) {
185 fprintf(stderr, "--------------------------------------------------------------\n");
186 r600_bytecode_disasm(&shader->shader.bc);
187 fprintf(stderr, "______________________________________________________________\n");
188 } else if ((dump && sb_disasm) || use_sb) {
189 r = r600_sb_bytecode_process(rctx, &shader->shader.bc, &shader->shader,
190 dump, use_sb);
191 if (r) {
192 R600_ERR("r600_sb_bytecode_process failed !\n");
193 goto error;
194 }
195 }
196
197 if (shader->gs_copy_shader) {
198 if (dump) {
199 // dump copy shader
200 r = r600_sb_bytecode_process(rctx, &shader->gs_copy_shader->shader.bc,
201 &shader->gs_copy_shader->shader, dump, 0);
202 if (r)
203 goto error;
204 }
205
206 if ((r = store_shader(ctx, shader->gs_copy_shader)))
207 goto error;
208 }
209
210 /* Store the shader in a buffer. */
211 if ((r = store_shader(ctx, shader)))
212 goto error;
213
214 /* Build state. */
215 switch (shader->shader.processor_type) {
216 case TGSI_PROCESSOR_GEOMETRY:
217 if (rctx->b.chip_class >= EVERGREEN) {
218 evergreen_update_gs_state(ctx, shader);
219 evergreen_update_vs_state(ctx, shader->gs_copy_shader);
220 } else {
221 r600_update_gs_state(ctx, shader);
222 r600_update_vs_state(ctx, shader->gs_copy_shader);
223 }
224 break;
225 case TGSI_PROCESSOR_VERTEX:
226 export_shader = key.vs.as_es;
227 if (rctx->b.chip_class >= EVERGREEN) {
228 if (export_shader)
229 evergreen_update_es_state(ctx, shader);
230 else
231 evergreen_update_vs_state(ctx, shader);
232 } else {
233 if (export_shader)
234 r600_update_es_state(ctx, shader);
235 else
236 r600_update_vs_state(ctx, shader);
237 }
238 break;
239 case TGSI_PROCESSOR_FRAGMENT:
240 if (rctx->b.chip_class >= EVERGREEN) {
241 evergreen_update_ps_state(ctx, shader);
242 } else {
243 r600_update_ps_state(ctx, shader);
244 }
245 break;
246 default:
247 r = -EINVAL;
248 goto error;
249 }
250 return 0;
251
252 error:
253 r600_pipe_shader_destroy(ctx, shader);
254 return r;
255 }
256
257 void r600_pipe_shader_destroy(struct pipe_context *ctx, struct r600_pipe_shader *shader)
258 {
259 pipe_resource_reference((struct pipe_resource**)&shader->bo, NULL);
260 r600_bytecode_clear(&shader->shader.bc);
261 r600_release_command_buffer(&shader->command_buffer);
262 }
263
264 /*
265 * tgsi -> r600 shader
266 */
267 struct r600_shader_tgsi_instruction;
268
269 struct r600_shader_src {
270 unsigned sel;
271 unsigned swizzle[4];
272 unsigned neg;
273 unsigned abs;
274 unsigned rel;
275 unsigned kc_bank;
276 boolean kc_rel; /* true if cache bank is indexed */
277 uint32_t value[4];
278 };
279
280 struct eg_interp {
281 boolean enabled;
282 unsigned ij_index;
283 };
284
285 struct r600_shader_ctx {
286 struct tgsi_shader_info info;
287 struct tgsi_parse_context parse;
288 const struct tgsi_token *tokens;
289 unsigned type;
290 unsigned file_offset[TGSI_FILE_COUNT];
291 unsigned temp_reg;
292 const struct r600_shader_tgsi_instruction *inst_info;
293 struct r600_bytecode *bc;
294 struct r600_shader *shader;
295 struct r600_shader_src src[4];
296 uint32_t *literals;
297 uint32_t nliterals;
298 uint32_t max_driver_temp_used;
299 boolean use_llvm;
300 /* needed for evergreen interpolation */
301 struct eg_interp eg_interpolators[6]; // indexed by Persp/Linear * 3 + sample/center/centroid
302 /* evergreen/cayman also store sample mask in face register */
303 int face_gpr;
304 /* sample id is .w component stored in fixed point position register */
305 int fixed_pt_position_gpr;
306 int colors_used;
307 boolean clip_vertex_write;
308 unsigned cv_output;
309 unsigned edgeflag_output;
310 int fragcoord_input;
311 int native_integers;
312 int next_ring_offset;
313 int gs_out_ring_offset;
314 int gs_next_vertex;
315 struct r600_shader *gs_for_vs;
316 int gs_export_gpr_tregs[4];
317 const struct pipe_stream_output_info *gs_stream_output_info;
318 unsigned enabled_stream_buffers_mask;
319 };
320
321 struct r600_shader_tgsi_instruction {
322 unsigned op;
323 int (*process)(struct r600_shader_ctx *ctx);
324 };
325
326 static int emit_gs_ring_writes(struct r600_shader_ctx *ctx, const struct pipe_stream_output_info *so, int stream, bool ind);
327 static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[], eg_shader_tgsi_instruction[], cm_shader_tgsi_instruction[];
328 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx);
329 static inline void callstack_push(struct r600_shader_ctx *ctx, unsigned reason);
330 static void fc_pushlevel(struct r600_shader_ctx *ctx, int type);
331 static int tgsi_else(struct r600_shader_ctx *ctx);
332 static int tgsi_endif(struct r600_shader_ctx *ctx);
333 static int tgsi_bgnloop(struct r600_shader_ctx *ctx);
334 static int tgsi_endloop(struct r600_shader_ctx *ctx);
335 static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx);
336 static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx,
337 unsigned int cb_idx, unsigned cb_rel, unsigned int offset, unsigned ar_chan,
338 unsigned int dst_reg);
339 static void r600_bytecode_src(struct r600_bytecode_alu_src *bc_src,
340 const struct r600_shader_src *shader_src,
341 unsigned chan);
342
343 static int tgsi_is_supported(struct r600_shader_ctx *ctx)
344 {
345 struct tgsi_full_instruction *i = &ctx->parse.FullToken.FullInstruction;
346 int j;
347
348 if (i->Instruction.NumDstRegs > 1 && i->Instruction.Opcode != TGSI_OPCODE_DFRACEXP) {
349 R600_ERR("too many dst (%d)\n", i->Instruction.NumDstRegs);
350 return -EINVAL;
351 }
352 if (i->Instruction.Predicate) {
353 R600_ERR("predicate unsupported\n");
354 return -EINVAL;
355 }
356 #if 0
357 if (i->Instruction.Label) {
358 R600_ERR("label unsupported\n");
359 return -EINVAL;
360 }
361 #endif
362 for (j = 0; j < i->Instruction.NumSrcRegs; j++) {
363 if (i->Src[j].Register.Dimension) {
364 switch (i->Src[j].Register.File) {
365 case TGSI_FILE_CONSTANT:
366 break;
367 case TGSI_FILE_INPUT:
368 if (ctx->type == TGSI_PROCESSOR_GEOMETRY)
369 break;
370 default:
371 R600_ERR("unsupported src %d (dimension %d)\n", j,
372 i->Src[j].Register.Dimension);
373 return -EINVAL;
374 }
375 }
376 }
377 for (j = 0; j < i->Instruction.NumDstRegs; j++) {
378 if (i->Dst[j].Register.Dimension) {
379 R600_ERR("unsupported dst (dimension)\n");
380 return -EINVAL;
381 }
382 }
383 return 0;
384 }
385
386 int eg_get_interpolator_index(unsigned interpolate, unsigned location)
387 {
388 if (interpolate == TGSI_INTERPOLATE_COLOR ||
389 interpolate == TGSI_INTERPOLATE_LINEAR ||
390 interpolate == TGSI_INTERPOLATE_PERSPECTIVE)
391 {
392 int is_linear = interpolate == TGSI_INTERPOLATE_LINEAR;
393 int loc;
394
395 switch(location) {
396 case TGSI_INTERPOLATE_LOC_CENTER:
397 loc = 1;
398 break;
399 case TGSI_INTERPOLATE_LOC_CENTROID:
400 loc = 2;
401 break;
402 case TGSI_INTERPOLATE_LOC_SAMPLE:
403 default:
404 loc = 0; break;
405 }
406
407 return is_linear * 3 + loc;
408 }
409
410 return -1;
411 }
412
413 static void evergreen_interp_assign_ij_index(struct r600_shader_ctx *ctx,
414 int input)
415 {
416 int i = eg_get_interpolator_index(
417 ctx->shader->input[input].interpolate,
418 ctx->shader->input[input].interpolate_location);
419 assert(i >= 0);
420 ctx->shader->input[input].ij_index = ctx->eg_interpolators[i].ij_index;
421 }
422
423 static int evergreen_interp_alu(struct r600_shader_ctx *ctx, int input)
424 {
425 int i, r;
426 struct r600_bytecode_alu alu;
427 int gpr = 0, base_chan = 0;
428 int ij_index = ctx->shader->input[input].ij_index;
429
430 /* work out gpr and base_chan from index */
431 gpr = ij_index / 2;
432 base_chan = (2 * (ij_index % 2)) + 1;
433
434 for (i = 0; i < 8; i++) {
435 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
436
437 if (i < 4)
438 alu.op = ALU_OP2_INTERP_ZW;
439 else
440 alu.op = ALU_OP2_INTERP_XY;
441
442 if ((i > 1) && (i < 6)) {
443 alu.dst.sel = ctx->shader->input[input].gpr;
444 alu.dst.write = 1;
445 }
446
447 alu.dst.chan = i % 4;
448
449 alu.src[0].sel = gpr;
450 alu.src[0].chan = (base_chan - (i % 2));
451
452 alu.src[1].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
453
454 alu.bank_swizzle_force = SQ_ALU_VEC_210;
455 if ((i % 4) == 3)
456 alu.last = 1;
457 r = r600_bytecode_add_alu(ctx->bc, &alu);
458 if (r)
459 return r;
460 }
461 return 0;
462 }
463
464 static int evergreen_interp_flat(struct r600_shader_ctx *ctx, int input)
465 {
466 int i, r;
467 struct r600_bytecode_alu alu;
468
469 for (i = 0; i < 4; i++) {
470 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
471
472 alu.op = ALU_OP1_INTERP_LOAD_P0;
473
474 alu.dst.sel = ctx->shader->input[input].gpr;
475 alu.dst.write = 1;
476
477 alu.dst.chan = i;
478
479 alu.src[0].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
480 alu.src[0].chan = i;
481
482 if (i == 3)
483 alu.last = 1;
484 r = r600_bytecode_add_alu(ctx->bc, &alu);
485 if (r)
486 return r;
487 }
488 return 0;
489 }
490
491 /*
492 * Special export handling in shaders
493 *
494 * shader export ARRAY_BASE for EXPORT_POS:
495 * 60 is position
496 * 61 is misc vector
497 * 62, 63 are clip distance vectors
498 *
499 * The use of the values exported in 61-63 are controlled by PA_CL_VS_OUT_CNTL:
500 * VS_OUT_MISC_VEC_ENA - enables the use of all fields in export 61
501 * USE_VTX_POINT_SIZE - point size in the X channel of export 61
502 * USE_VTX_EDGE_FLAG - edge flag in the Y channel of export 61
503 * USE_VTX_RENDER_TARGET_INDX - render target index in the Z channel of export 61
504 * USE_VTX_VIEWPORT_INDX - viewport index in the W channel of export 61
505 * USE_VTX_KILL_FLAG - kill flag in the Z channel of export 61 (mutually
506 * exclusive from render target index)
507 * VS_OUT_CCDIST0_VEC_ENA/VS_OUT_CCDIST1_VEC_ENA - enable clip distance vectors
508 *
509 *
510 * shader export ARRAY_BASE for EXPORT_PIXEL:
511 * 0-7 CB targets
512 * 61 computed Z vector
513 *
514 * The use of the values exported in the computed Z vector are controlled
515 * by DB_SHADER_CONTROL:
516 * Z_EXPORT_ENABLE - Z as a float in RED
517 * STENCIL_REF_EXPORT_ENABLE - stencil ref as int in GREEN
518 * COVERAGE_TO_MASK_ENABLE - alpha to mask in ALPHA
519 * MASK_EXPORT_ENABLE - pixel sample mask in BLUE
520 * DB_SOURCE_FORMAT - export control restrictions
521 *
522 */
523
524
525 /* Map name/sid pair from tgsi to the 8-bit semantic index for SPI setup */
526 static int r600_spi_sid(struct r600_shader_io * io)
527 {
528 int index, name = io->name;
529
530 /* These params are handled differently, they don't need
531 * semantic indices, so we'll use 0 for them.
532 */
533 if (name == TGSI_SEMANTIC_POSITION ||
534 name == TGSI_SEMANTIC_PSIZE ||
535 name == TGSI_SEMANTIC_EDGEFLAG ||
536 name == TGSI_SEMANTIC_FACE ||
537 name == TGSI_SEMANTIC_SAMPLEMASK)
538 index = 0;
539 else {
540 if (name == TGSI_SEMANTIC_GENERIC) {
541 /* For generic params simply use sid from tgsi */
542 index = io->sid;
543 } else {
544 /* For non-generic params - pack name and sid into 8 bits */
545 index = 0x80 | (name<<3) | (io->sid);
546 }
547
548 /* Make sure that all really used indices have nonzero value, so
549 * we can just compare it to 0 later instead of comparing the name
550 * with different values to detect special cases. */
551 index++;
552 }
553
554 return index;
555 };
556
557 /* turn input into interpolate on EG */
558 static int evergreen_interp_input(struct r600_shader_ctx *ctx, int index)
559 {
560 int r = 0;
561
562 if (ctx->shader->input[index].spi_sid) {
563 ctx->shader->input[index].lds_pos = ctx->shader->nlds++;
564 if (ctx->shader->input[index].interpolate > 0) {
565 evergreen_interp_assign_ij_index(ctx, index);
566 if (!ctx->use_llvm)
567 r = evergreen_interp_alu(ctx, index);
568 } else {
569 if (!ctx->use_llvm)
570 r = evergreen_interp_flat(ctx, index);
571 }
572 }
573 return r;
574 }
575
576 static int select_twoside_color(struct r600_shader_ctx *ctx, int front, int back)
577 {
578 struct r600_bytecode_alu alu;
579 int i, r;
580 int gpr_front = ctx->shader->input[front].gpr;
581 int gpr_back = ctx->shader->input[back].gpr;
582
583 for (i = 0; i < 4; i++) {
584 memset(&alu, 0, sizeof(alu));
585 alu.op = ALU_OP3_CNDGT;
586 alu.is_op3 = 1;
587 alu.dst.write = 1;
588 alu.dst.sel = gpr_front;
589 alu.src[0].sel = ctx->face_gpr;
590 alu.src[1].sel = gpr_front;
591 alu.src[2].sel = gpr_back;
592
593 alu.dst.chan = i;
594 alu.src[1].chan = i;
595 alu.src[2].chan = i;
596 alu.last = (i==3);
597
598 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
599 return r;
600 }
601
602 return 0;
603 }
604
605 static int vs_add_primid_output(struct r600_shader_ctx *ctx, int prim_id_sid)
606 {
607 int i;
608 i = ctx->shader->noutput++;
609 ctx->shader->output[i].name = TGSI_SEMANTIC_PRIMID;
610 ctx->shader->output[i].sid = 0;
611 ctx->shader->output[i].gpr = 0;
612 ctx->shader->output[i].interpolate = TGSI_INTERPOLATE_CONSTANT;
613 ctx->shader->output[i].write_mask = 0x4;
614 ctx->shader->output[i].spi_sid = prim_id_sid;
615
616 return 0;
617 }
618
619 static int tgsi_declaration(struct r600_shader_ctx *ctx)
620 {
621 struct tgsi_full_declaration *d = &ctx->parse.FullToken.FullDeclaration;
622 int r, i, j, count = d->Range.Last - d->Range.First + 1;
623
624 switch (d->Declaration.File) {
625 case TGSI_FILE_INPUT:
626 for (j = 0; j < count; j++) {
627 i = ctx->shader->ninput + j;
628 assert(i < Elements(ctx->shader->input));
629 ctx->shader->input[i].name = d->Semantic.Name;
630 ctx->shader->input[i].sid = d->Semantic.Index + j;
631 ctx->shader->input[i].interpolate = d->Interp.Interpolate;
632 ctx->shader->input[i].interpolate_location = d->Interp.Location;
633 ctx->shader->input[i].gpr = ctx->file_offset[TGSI_FILE_INPUT] + d->Range.First + j;
634 if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
635 ctx->shader->input[i].spi_sid = r600_spi_sid(&ctx->shader->input[i]);
636 switch (ctx->shader->input[i].name) {
637 case TGSI_SEMANTIC_FACE:
638 if (ctx->face_gpr != -1)
639 ctx->shader->input[i].gpr = ctx->face_gpr; /* already allocated by allocate_system_value_inputs */
640 else
641 ctx->face_gpr = ctx->shader->input[i].gpr;
642 break;
643 case TGSI_SEMANTIC_COLOR:
644 ctx->colors_used++;
645 break;
646 case TGSI_SEMANTIC_POSITION:
647 ctx->fragcoord_input = i;
648 break;
649 case TGSI_SEMANTIC_PRIMID:
650 /* set this for now */
651 ctx->shader->gs_prim_id_input = true;
652 ctx->shader->ps_prim_id_input = i;
653 break;
654 }
655 if (ctx->bc->chip_class >= EVERGREEN) {
656 if ((r = evergreen_interp_input(ctx, i)))
657 return r;
658 }
659 } else if (ctx->type == TGSI_PROCESSOR_GEOMETRY) {
660 /* FIXME probably skip inputs if they aren't passed in the ring */
661 ctx->shader->input[i].ring_offset = ctx->next_ring_offset;
662 ctx->next_ring_offset += 16;
663 if (ctx->shader->input[i].name == TGSI_SEMANTIC_PRIMID)
664 ctx->shader->gs_prim_id_input = true;
665 }
666 }
667 ctx->shader->ninput += count;
668 break;
669 case TGSI_FILE_OUTPUT:
670 for (j = 0; j < count; j++) {
671 i = ctx->shader->noutput + j;
672 assert(i < Elements(ctx->shader->output));
673 ctx->shader->output[i].name = d->Semantic.Name;
674 ctx->shader->output[i].sid = d->Semantic.Index + j;
675 ctx->shader->output[i].gpr = ctx->file_offset[TGSI_FILE_OUTPUT] + d->Range.First + j;
676 ctx->shader->output[i].interpolate = d->Interp.Interpolate;
677 ctx->shader->output[i].write_mask = d->Declaration.UsageMask;
678 if (ctx->type == TGSI_PROCESSOR_VERTEX ||
679 ctx->type == TGSI_PROCESSOR_GEOMETRY) {
680 ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);
681 switch (d->Semantic.Name) {
682 case TGSI_SEMANTIC_CLIPDIST:
683 ctx->shader->clip_dist_write |= d->Declaration.UsageMask <<
684 ((d->Semantic.Index + j) << 2);
685 break;
686 case TGSI_SEMANTIC_PSIZE:
687 ctx->shader->vs_out_misc_write = 1;
688 ctx->shader->vs_out_point_size = 1;
689 break;
690 case TGSI_SEMANTIC_EDGEFLAG:
691 ctx->shader->vs_out_misc_write = 1;
692 ctx->shader->vs_out_edgeflag = 1;
693 ctx->edgeflag_output = i;
694 break;
695 case TGSI_SEMANTIC_VIEWPORT_INDEX:
696 ctx->shader->vs_out_misc_write = 1;
697 ctx->shader->vs_out_viewport = 1;
698 break;
699 case TGSI_SEMANTIC_LAYER:
700 ctx->shader->vs_out_misc_write = 1;
701 ctx->shader->vs_out_layer = 1;
702 break;
703 case TGSI_SEMANTIC_CLIPVERTEX:
704 ctx->clip_vertex_write = TRUE;
705 ctx->cv_output = i;
706 break;
707 }
708 if (ctx->type == TGSI_PROCESSOR_GEOMETRY) {
709 ctx->gs_out_ring_offset += 16;
710 }
711 } else if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
712 switch (d->Semantic.Name) {
713 case TGSI_SEMANTIC_COLOR:
714 ctx->shader->nr_ps_max_color_exports++;
715 break;
716 }
717 }
718 }
719 ctx->shader->noutput += count;
720 break;
721 case TGSI_FILE_TEMPORARY:
722 if (ctx->info.indirect_files & (1 << TGSI_FILE_TEMPORARY)) {
723 if (d->Array.ArrayID) {
724 r600_add_gpr_array(ctx->shader,
725 ctx->file_offset[TGSI_FILE_TEMPORARY] +
726 d->Range.First,
727 d->Range.Last - d->Range.First + 1, 0x0F);
728 }
729 }
730 break;
731
732 case TGSI_FILE_CONSTANT:
733 case TGSI_FILE_SAMPLER:
734 case TGSI_FILE_SAMPLER_VIEW:
735 case TGSI_FILE_ADDRESS:
736 break;
737
738 case TGSI_FILE_SYSTEM_VALUE:
739 if (d->Semantic.Name == TGSI_SEMANTIC_SAMPLEMASK ||
740 d->Semantic.Name == TGSI_SEMANTIC_SAMPLEID ||
741 d->Semantic.Name == TGSI_SEMANTIC_SAMPLEPOS) {
742 break; /* Already handled from allocate_system_value_inputs */
743 } else if (d->Semantic.Name == TGSI_SEMANTIC_INSTANCEID) {
744 if (!ctx->native_integers) {
745 struct r600_bytecode_alu alu;
746 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
747
748 alu.op = ALU_OP1_INT_TO_FLT;
749 alu.src[0].sel = 0;
750 alu.src[0].chan = 3;
751
752 alu.dst.sel = 0;
753 alu.dst.chan = 3;
754 alu.dst.write = 1;
755 alu.last = 1;
756
757 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
758 return r;
759 }
760 break;
761 } else if (d->Semantic.Name == TGSI_SEMANTIC_VERTEXID)
762 break;
763 else if (d->Semantic.Name == TGSI_SEMANTIC_INVOCATIONID)
764 break;
765 default:
766 R600_ERR("unsupported file %d declaration\n", d->Declaration.File);
767 return -EINVAL;
768 }
769 return 0;
770 }
771
772 static int r600_get_temp(struct r600_shader_ctx *ctx)
773 {
774 return ctx->temp_reg + ctx->max_driver_temp_used++;
775 }
776
777 static int allocate_system_value_inputs(struct r600_shader_ctx *ctx, int gpr_offset)
778 {
779 struct tgsi_parse_context parse;
780 struct {
781 boolean enabled;
782 int *reg;
783 unsigned name, alternate_name;
784 } inputs[2] = {
785 { false, &ctx->face_gpr, TGSI_SEMANTIC_SAMPLEMASK, ~0u }, /* lives in Front Face GPR.z */
786
787 { false, &ctx->fixed_pt_position_gpr, TGSI_SEMANTIC_SAMPLEID, TGSI_SEMANTIC_SAMPLEPOS } /* SAMPLEID is in Fixed Point Position GPR.w */
788 };
789 int i, k, num_regs = 0;
790
791 if (tgsi_parse_init(&parse, ctx->tokens) != TGSI_PARSE_OK) {
792 return 0;
793 }
794
795 /* need to scan shader for system values and interpolateAtSample/Offset/Centroid */
796 while (!tgsi_parse_end_of_tokens(&parse)) {
797 tgsi_parse_token(&parse);
798
799 if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_INSTRUCTION) {
800 const struct tgsi_full_instruction *inst = &parse.FullToken.FullInstruction;
801 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE ||
802 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
803 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_CENTROID)
804 {
805 int interpolate, location, k;
806
807 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
808 location = TGSI_INTERPOLATE_LOC_CENTER;
809 inputs[1].enabled = true; /* needs SAMPLEID */
810 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
811 location = TGSI_INTERPOLATE_LOC_CENTER;
812 /* Needs sample positions, currently those are always available */
813 } else {
814 location = TGSI_INTERPOLATE_LOC_CENTROID;
815 }
816
817 interpolate = ctx->info.input_interpolate[inst->Src[0].Register.Index];
818 k = eg_get_interpolator_index(interpolate, location);
819 ctx->eg_interpolators[k].enabled = true;
820 }
821 } else if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_DECLARATION) {
822 struct tgsi_full_declaration *d = &parse.FullToken.FullDeclaration;
823 if (d->Declaration.File == TGSI_FILE_SYSTEM_VALUE) {
824 for (k = 0; k < Elements(inputs); k++) {
825 if (d->Semantic.Name == inputs[k].name ||
826 d->Semantic.Name == inputs[k].alternate_name) {
827 inputs[k].enabled = true;
828 }
829 }
830 }
831 }
832 }
833
834 tgsi_parse_free(&parse);
835
836 for (i = 0; i < Elements(inputs); i++) {
837 boolean enabled = inputs[i].enabled;
838 int *reg = inputs[i].reg;
839 unsigned name = inputs[i].name;
840
841 if (enabled) {
842 int gpr = gpr_offset + num_regs++;
843
844 // add to inputs, allocate a gpr
845 k = ctx->shader->ninput ++;
846 ctx->shader->input[k].name = name;
847 ctx->shader->input[k].sid = 0;
848 ctx->shader->input[k].interpolate = TGSI_INTERPOLATE_CONSTANT;
849 ctx->shader->input[k].interpolate_location = TGSI_INTERPOLATE_LOC_CENTER;
850 *reg = ctx->shader->input[k].gpr = gpr;
851 }
852 }
853
854 return gpr_offset + num_regs;
855 }
856
857 /*
858 * for evergreen we need to scan the shader to find the number of GPRs we need to
859 * reserve for interpolation and system values
860 *
861 * we need to know if we are going to emit
862 * any sample or centroid inputs
863 * if perspective and linear are required
864 */
865 static int evergreen_gpr_count(struct r600_shader_ctx *ctx)
866 {
867 int i;
868 int num_baryc;
869 struct tgsi_parse_context parse;
870
871 memset(&ctx->eg_interpolators, 0, sizeof(ctx->eg_interpolators));
872
873 for (i = 0; i < ctx->info.num_inputs; i++) {
874 int k;
875 /* skip position/face/mask/sampleid */
876 if (ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_POSITION ||
877 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_FACE ||
878 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_SAMPLEMASK ||
879 ctx->info.input_semantic_name[i] == TGSI_SEMANTIC_SAMPLEID)
880 continue;
881
882 k = eg_get_interpolator_index(
883 ctx->info.input_interpolate[i],
884 ctx->info.input_interpolate_loc[i]);
885 if (k >= 0)
886 ctx->eg_interpolators[k].enabled = TRUE;
887 }
888
889 if (tgsi_parse_init(&parse, ctx->tokens) != TGSI_PARSE_OK) {
890 return 0;
891 }
892
893 /* need to scan shader for system values and interpolateAtSample/Offset/Centroid */
894 while (!tgsi_parse_end_of_tokens(&parse)) {
895 tgsi_parse_token(&parse);
896
897 if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_INSTRUCTION) {
898 const struct tgsi_full_instruction *inst = &parse.FullToken.FullInstruction;
899 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE ||
900 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
901 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_CENTROID)
902 {
903 int interpolate, location, k;
904
905 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
906 location = TGSI_INTERPOLATE_LOC_CENTER;
907 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
908 location = TGSI_INTERPOLATE_LOC_CENTER;
909 } else {
910 location = TGSI_INTERPOLATE_LOC_CENTROID;
911 }
912
913 interpolate = ctx->info.input_interpolate[inst->Src[0].Register.Index];
914 k = eg_get_interpolator_index(interpolate, location);
915 ctx->eg_interpolators[k].enabled = true;
916 }
917 }
918 }
919
920 tgsi_parse_free(&parse);
921
922 /* assign gpr to each interpolator according to priority */
923 num_baryc = 0;
924 for (i = 0; i < Elements(ctx->eg_interpolators); i++) {
925 if (ctx->eg_interpolators[i].enabled) {
926 ctx->eg_interpolators[i].ij_index = num_baryc;
927 num_baryc ++;
928 }
929 }
930
931 /* XXX PULL MODEL and LINE STIPPLE */
932
933 num_baryc = (num_baryc + 1) >> 1;
934 return allocate_system_value_inputs(ctx, num_baryc);
935 }
936
937 /* sample_id_sel == NULL means fetch for current sample */
938 static int load_sample_position(struct r600_shader_ctx *ctx, struct r600_shader_src *sample_id, int chan_sel)
939 {
940 struct r600_bytecode_vtx vtx;
941 int r, t1;
942
943 assert(ctx->fixed_pt_position_gpr != -1);
944
945 t1 = r600_get_temp(ctx);
946
947 memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
948 vtx.op = FETCH_OP_VFETCH;
949 vtx.buffer_id = R600_BUFFER_INFO_CONST_BUFFER;
950 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
951 if (sample_id == NULL) {
952 vtx.src_gpr = ctx->fixed_pt_position_gpr; // SAMPLEID is in .w;
953 vtx.src_sel_x = 3;
954 }
955 else {
956 struct r600_bytecode_alu alu;
957
958 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
959 alu.op = ALU_OP1_MOV;
960 r600_bytecode_src(&alu.src[0], sample_id, chan_sel);
961 alu.dst.sel = t1;
962 alu.dst.write = 1;
963 alu.last = 1;
964 r = r600_bytecode_add_alu(ctx->bc, &alu);
965 if (r)
966 return r;
967
968 vtx.src_gpr = t1;
969 vtx.src_sel_x = 0;
970 }
971 vtx.mega_fetch_count = 16;
972 vtx.dst_gpr = t1;
973 vtx.dst_sel_x = 0;
974 vtx.dst_sel_y = 1;
975 vtx.dst_sel_z = 2;
976 vtx.dst_sel_w = 3;
977 vtx.data_format = FMT_32_32_32_32_FLOAT;
978 vtx.num_format_all = 2;
979 vtx.format_comp_all = 1;
980 vtx.use_const_fields = 0;
981 vtx.offset = 1; // first element is size of buffer
982 vtx.endian = r600_endian_swap(32);
983 vtx.srf_mode_all = 1; /* SRF_MODE_NO_ZERO */
984
985 r = r600_bytecode_add_vtx(ctx->bc, &vtx);
986 if (r)
987 return r;
988
989 return t1;
990 }
991
992 static void tgsi_src(struct r600_shader_ctx *ctx,
993 const struct tgsi_full_src_register *tgsi_src,
994 struct r600_shader_src *r600_src)
995 {
996 memset(r600_src, 0, sizeof(*r600_src));
997 r600_src->swizzle[0] = tgsi_src->Register.SwizzleX;
998 r600_src->swizzle[1] = tgsi_src->Register.SwizzleY;
999 r600_src->swizzle[2] = tgsi_src->Register.SwizzleZ;
1000 r600_src->swizzle[3] = tgsi_src->Register.SwizzleW;
1001 r600_src->neg = tgsi_src->Register.Negate;
1002 r600_src->abs = tgsi_src->Register.Absolute;
1003
1004 if (tgsi_src->Register.File == TGSI_FILE_IMMEDIATE) {
1005 int index;
1006 if ((tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleY) &&
1007 (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleZ) &&
1008 (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleW)) {
1009
1010 index = tgsi_src->Register.Index * 4 + tgsi_src->Register.SwizzleX;
1011 r600_bytecode_special_constants(ctx->literals[index], &r600_src->sel, &r600_src->neg);
1012 if (r600_src->sel != V_SQ_ALU_SRC_LITERAL)
1013 return;
1014 }
1015 index = tgsi_src->Register.Index;
1016 r600_src->sel = V_SQ_ALU_SRC_LITERAL;
1017 memcpy(r600_src->value, ctx->literals + index * 4, sizeof(r600_src->value));
1018 } else if (tgsi_src->Register.File == TGSI_FILE_SYSTEM_VALUE) {
1019 if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEMASK) {
1020 r600_src->swizzle[0] = 2; // Z value
1021 r600_src->swizzle[1] = 2;
1022 r600_src->swizzle[2] = 2;
1023 r600_src->swizzle[3] = 2;
1024 r600_src->sel = ctx->face_gpr;
1025 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEID) {
1026 r600_src->swizzle[0] = 3; // W value
1027 r600_src->swizzle[1] = 3;
1028 r600_src->swizzle[2] = 3;
1029 r600_src->swizzle[3] = 3;
1030 r600_src->sel = ctx->fixed_pt_position_gpr;
1031 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_SAMPLEPOS) {
1032 r600_src->swizzle[0] = 0;
1033 r600_src->swizzle[1] = 1;
1034 r600_src->swizzle[2] = 4;
1035 r600_src->swizzle[3] = 4;
1036 r600_src->sel = load_sample_position(ctx, NULL, -1);
1037 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INSTANCEID) {
1038 r600_src->swizzle[0] = 3;
1039 r600_src->swizzle[1] = 3;
1040 r600_src->swizzle[2] = 3;
1041 r600_src->swizzle[3] = 3;
1042 r600_src->sel = 0;
1043 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_VERTEXID) {
1044 r600_src->swizzle[0] = 0;
1045 r600_src->swizzle[1] = 0;
1046 r600_src->swizzle[2] = 0;
1047 r600_src->swizzle[3] = 0;
1048 r600_src->sel = 0;
1049 } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INVOCATIONID) {
1050 r600_src->swizzle[0] = 3;
1051 r600_src->swizzle[1] = 3;
1052 r600_src->swizzle[2] = 3;
1053 r600_src->swizzle[3] = 3;
1054 r600_src->sel = 1;
1055 }
1056 } else {
1057 if (tgsi_src->Register.Indirect)
1058 r600_src->rel = V_SQ_REL_RELATIVE;
1059 r600_src->sel = tgsi_src->Register.Index;
1060 r600_src->sel += ctx->file_offset[tgsi_src->Register.File];
1061 }
1062 if (tgsi_src->Register.File == TGSI_FILE_CONSTANT) {
1063 if (tgsi_src->Register.Dimension) {
1064 r600_src->kc_bank = tgsi_src->Dimension.Index;
1065 if (tgsi_src->Dimension.Indirect) {
1066 r600_src->kc_rel = 1;
1067 }
1068 }
1069 }
1070 }
1071
1072 static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx,
1073 unsigned int cb_idx, unsigned cb_rel, unsigned int offset, unsigned ar_chan,
1074 unsigned int dst_reg)
1075 {
1076 struct r600_bytecode_vtx vtx;
1077 unsigned int ar_reg;
1078 int r;
1079
1080 if (offset) {
1081 struct r600_bytecode_alu alu;
1082
1083 memset(&alu, 0, sizeof(alu));
1084
1085 alu.op = ALU_OP2_ADD_INT;
1086 alu.src[0].sel = ctx->bc->ar_reg;
1087 alu.src[0].chan = ar_chan;
1088
1089 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
1090 alu.src[1].value = offset;
1091
1092 alu.dst.sel = dst_reg;
1093 alu.dst.chan = ar_chan;
1094 alu.dst.write = 1;
1095 alu.last = 1;
1096
1097 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
1098 return r;
1099
1100 ar_reg = dst_reg;
1101 } else {
1102 ar_reg = ctx->bc->ar_reg;
1103 }
1104
1105 memset(&vtx, 0, sizeof(vtx));
1106 vtx.buffer_id = cb_idx;
1107 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1108 vtx.src_gpr = ar_reg;
1109 vtx.src_sel_x = ar_chan;
1110 vtx.mega_fetch_count = 16;
1111 vtx.dst_gpr = dst_reg;
1112 vtx.dst_sel_x = 0; /* SEL_X */
1113 vtx.dst_sel_y = 1; /* SEL_Y */
1114 vtx.dst_sel_z = 2; /* SEL_Z */
1115 vtx.dst_sel_w = 3; /* SEL_W */
1116 vtx.data_format = FMT_32_32_32_32_FLOAT;
1117 vtx.num_format_all = 2; /* NUM_FORMAT_SCALED */
1118 vtx.format_comp_all = 1; /* FORMAT_COMP_SIGNED */
1119 vtx.endian = r600_endian_swap(32);
1120 vtx.buffer_index_mode = cb_rel; // cb_rel ? V_SQ_CF_INDEX_0 : V_SQ_CF_INDEX_NONE;
1121
1122 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
1123 return r;
1124
1125 return 0;
1126 }
1127
1128 static int fetch_gs_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg)
1129 {
1130 struct r600_bytecode_vtx vtx;
1131 int r;
1132 unsigned index = src->Register.Index;
1133 unsigned vtx_id = src->Dimension.Index;
1134 int offset_reg = vtx_id / 3;
1135 int offset_chan = vtx_id % 3;
1136
1137 /* offsets of per-vertex data in ESGS ring are passed to GS in R0.x, R0.y,
1138 * R0.w, R1.x, R1.y, R1.z (it seems R0.z is used for PrimitiveID) */
1139
1140 if (offset_reg == 0 && offset_chan == 2)
1141 offset_chan = 3;
1142
1143 if (src->Dimension.Indirect) {
1144 int treg[3];
1145 int t2;
1146 struct r600_bytecode_alu alu;
1147 int r, i;
1148
1149 /* you have got to be shitting me -
1150 we have to put the R0.x/y/w into Rt.x Rt+1.x Rt+2.x then index reg from Rt.
1151 at least this is what fglrx seems to do. */
1152 for (i = 0; i < 3; i++) {
1153 treg[i] = r600_get_temp(ctx);
1154 }
1155 r600_add_gpr_array(ctx->shader, treg[0], 3, 0x0F);
1156
1157 t2 = r600_get_temp(ctx);
1158 for (i = 0; i < 3; i++) {
1159 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1160 alu.op = ALU_OP1_MOV;
1161 alu.src[0].sel = 0;
1162 alu.src[0].chan = i == 2 ? 3 : i;
1163 alu.dst.sel = treg[i];
1164 alu.dst.chan = 0;
1165 alu.dst.write = 1;
1166 alu.last = 1;
1167 r = r600_bytecode_add_alu(ctx->bc, &alu);
1168 if (r)
1169 return r;
1170 }
1171 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1172 alu.op = ALU_OP1_MOV;
1173 alu.src[0].sel = treg[0];
1174 alu.src[0].rel = 1;
1175 alu.dst.sel = t2;
1176 alu.dst.write = 1;
1177 alu.last = 1;
1178 r = r600_bytecode_add_alu(ctx->bc, &alu);
1179 if (r)
1180 return r;
1181 offset_reg = t2;
1182 }
1183
1184
1185 memset(&vtx, 0, sizeof(vtx));
1186 vtx.buffer_id = R600_GS_RING_CONST_BUFFER;
1187 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1188 vtx.src_gpr = offset_reg;
1189 vtx.src_sel_x = offset_chan;
1190 vtx.offset = index * 16; /*bytes*/
1191 vtx.mega_fetch_count = 16;
1192 vtx.dst_gpr = dst_reg;
1193 vtx.dst_sel_x = 0; /* SEL_X */
1194 vtx.dst_sel_y = 1; /* SEL_Y */
1195 vtx.dst_sel_z = 2; /* SEL_Z */
1196 vtx.dst_sel_w = 3; /* SEL_W */
1197 if (ctx->bc->chip_class >= EVERGREEN) {
1198 vtx.use_const_fields = 1;
1199 } else {
1200 vtx.data_format = FMT_32_32_32_32_FLOAT;
1201 }
1202
1203 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
1204 return r;
1205
1206 return 0;
1207 }
1208
1209 static int tgsi_split_gs_inputs(struct r600_shader_ctx *ctx)
1210 {
1211 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1212 int i;
1213
1214 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
1215 struct tgsi_full_src_register *src = &inst->Src[i];
1216
1217 if (src->Register.File == TGSI_FILE_INPUT) {
1218 if (ctx->shader->input[src->Register.Index].name == TGSI_SEMANTIC_PRIMID) {
1219 /* primitive id is in R0.z */
1220 ctx->src[i].sel = 0;
1221 ctx->src[i].swizzle[0] = 2;
1222 }
1223 }
1224 if (src->Register.File == TGSI_FILE_INPUT && src->Register.Dimension) {
1225 int treg = r600_get_temp(ctx);
1226
1227 fetch_gs_input(ctx, src, treg);
1228 ctx->src[i].sel = treg;
1229 }
1230 }
1231 return 0;
1232 }
1233
1234 static int tgsi_split_constant(struct r600_shader_ctx *ctx)
1235 {
1236 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1237 struct r600_bytecode_alu alu;
1238 int i, j, k, nconst, r;
1239
1240 for (i = 0, nconst = 0; i < inst->Instruction.NumSrcRegs; i++) {
1241 if (inst->Src[i].Register.File == TGSI_FILE_CONSTANT) {
1242 nconst++;
1243 }
1244 tgsi_src(ctx, &inst->Src[i], &ctx->src[i]);
1245 }
1246 for (i = 0, j = nconst - 1; i < inst->Instruction.NumSrcRegs; i++) {
1247 if (inst->Src[i].Register.File != TGSI_FILE_CONSTANT) {
1248 continue;
1249 }
1250
1251 if (ctx->src[i].rel) {
1252 int chan = inst->Src[i].Indirect.Swizzle;
1253 int treg = r600_get_temp(ctx);
1254 if ((r = tgsi_fetch_rel_const(ctx, ctx->src[i].kc_bank, ctx->src[i].kc_rel, ctx->src[i].sel - 512, chan, treg)))
1255 return r;
1256
1257 ctx->src[i].kc_bank = 0;
1258 ctx->src[i].kc_rel = 0;
1259 ctx->src[i].sel = treg;
1260 ctx->src[i].rel = 0;
1261 j--;
1262 } else if (j > 0) {
1263 int treg = r600_get_temp(ctx);
1264 for (k = 0; k < 4; k++) {
1265 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1266 alu.op = ALU_OP1_MOV;
1267 alu.src[0].sel = ctx->src[i].sel;
1268 alu.src[0].chan = k;
1269 alu.src[0].rel = ctx->src[i].rel;
1270 alu.src[0].kc_bank = ctx->src[i].kc_bank;
1271 alu.src[0].kc_rel = ctx->src[i].kc_rel;
1272 alu.dst.sel = treg;
1273 alu.dst.chan = k;
1274 alu.dst.write = 1;
1275 if (k == 3)
1276 alu.last = 1;
1277 r = r600_bytecode_add_alu(ctx->bc, &alu);
1278 if (r)
1279 return r;
1280 }
1281 ctx->src[i].sel = treg;
1282 ctx->src[i].rel =0;
1283 j--;
1284 }
1285 }
1286 return 0;
1287 }
1288
1289 /* need to move any immediate into a temp - for trig functions which use literal for PI stuff */
1290 static int tgsi_split_literal_constant(struct r600_shader_ctx *ctx)
1291 {
1292 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
1293 struct r600_bytecode_alu alu;
1294 int i, j, k, nliteral, r;
1295
1296 for (i = 0, nliteral = 0; i < inst->Instruction.NumSrcRegs; i++) {
1297 if (ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
1298 nliteral++;
1299 }
1300 }
1301 for (i = 0, j = nliteral - 1; i < inst->Instruction.NumSrcRegs; i++) {
1302 if (j > 0 && ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
1303 int treg = r600_get_temp(ctx);
1304 for (k = 0; k < 4; k++) {
1305 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1306 alu.op = ALU_OP1_MOV;
1307 alu.src[0].sel = ctx->src[i].sel;
1308 alu.src[0].chan = k;
1309 alu.src[0].value = ctx->src[i].value[k];
1310 alu.dst.sel = treg;
1311 alu.dst.chan = k;
1312 alu.dst.write = 1;
1313 if (k == 3)
1314 alu.last = 1;
1315 r = r600_bytecode_add_alu(ctx->bc, &alu);
1316 if (r)
1317 return r;
1318 }
1319 ctx->src[i].sel = treg;
1320 j--;
1321 }
1322 }
1323 return 0;
1324 }
1325
1326 static int process_twoside_color_inputs(struct r600_shader_ctx *ctx)
1327 {
1328 int i, r, count = ctx->shader->ninput;
1329
1330 for (i = 0; i < count; i++) {
1331 if (ctx->shader->input[i].name == TGSI_SEMANTIC_COLOR) {
1332 r = select_twoside_color(ctx, i, ctx->shader->input[i].back_color_input);
1333 if (r)
1334 return r;
1335 }
1336 }
1337 return 0;
1338 }
1339
1340 static int emit_streamout(struct r600_shader_ctx *ctx, struct pipe_stream_output_info *so,
1341 int stream, unsigned *stream_item_size)
1342 {
1343 unsigned so_gpr[PIPE_MAX_SHADER_OUTPUTS];
1344 unsigned start_comp[PIPE_MAX_SHADER_OUTPUTS];
1345 int i, j, r;
1346
1347 /* Sanity checking. */
1348 if (so->num_outputs > PIPE_MAX_SO_OUTPUTS) {
1349 R600_ERR("Too many stream outputs: %d\n", so->num_outputs);
1350 r = -EINVAL;
1351 goto out_err;
1352 }
1353 for (i = 0; i < so->num_outputs; i++) {
1354 if (so->output[i].output_buffer >= 4) {
1355 R600_ERR("Exceeded the max number of stream output buffers, got: %d\n",
1356 so->output[i].output_buffer);
1357 r = -EINVAL;
1358 goto out_err;
1359 }
1360 }
1361
1362 /* Initialize locations where the outputs are stored. */
1363 for (i = 0; i < so->num_outputs; i++) {
1364
1365 so_gpr[i] = ctx->shader->output[so->output[i].register_index].gpr;
1366 start_comp[i] = so->output[i].start_component;
1367 /* Lower outputs with dst_offset < start_component.
1368 *
1369 * We can only output 4D vectors with a write mask, e.g. we can
1370 * only output the W component at offset 3, etc. If we want
1371 * to store Y, Z, or W at buffer offset 0, we need to use MOV
1372 * to move it to X and output X. */
1373 if (so->output[i].dst_offset < so->output[i].start_component) {
1374 unsigned tmp = r600_get_temp(ctx);
1375
1376 for (j = 0; j < so->output[i].num_components; j++) {
1377 struct r600_bytecode_alu alu;
1378 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1379 alu.op = ALU_OP1_MOV;
1380 alu.src[0].sel = so_gpr[i];
1381 alu.src[0].chan = so->output[i].start_component + j;
1382
1383 alu.dst.sel = tmp;
1384 alu.dst.chan = j;
1385 alu.dst.write = 1;
1386 if (j == so->output[i].num_components - 1)
1387 alu.last = 1;
1388 r = r600_bytecode_add_alu(ctx->bc, &alu);
1389 if (r)
1390 return r;
1391 }
1392 start_comp[i] = 0;
1393 so_gpr[i] = tmp;
1394 }
1395 }
1396
1397 /* Write outputs to buffers. */
1398 for (i = 0; i < so->num_outputs; i++) {
1399 struct r600_bytecode_output output;
1400
1401 if (stream != -1 && stream != so->output[i].output_buffer)
1402 continue;
1403
1404 memset(&output, 0, sizeof(struct r600_bytecode_output));
1405 output.gpr = so_gpr[i];
1406 output.elem_size = so->output[i].num_components - 1;
1407 if (output.elem_size == 2)
1408 output.elem_size = 3; // 3 not supported, write 4 with junk at end
1409 output.array_base = so->output[i].dst_offset - start_comp[i];
1410 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE;
1411 output.burst_count = 1;
1412 /* array_size is an upper limit for the burst_count
1413 * with MEM_STREAM instructions */
1414 output.array_size = 0xFFF;
1415 output.comp_mask = ((1 << so->output[i].num_components) - 1) << start_comp[i];
1416
1417 if (ctx->bc->chip_class >= EVERGREEN) {
1418 switch (so->output[i].output_buffer) {
1419 case 0:
1420 output.op = CF_OP_MEM_STREAM0_BUF0;
1421 break;
1422 case 1:
1423 output.op = CF_OP_MEM_STREAM0_BUF1;
1424 break;
1425 case 2:
1426 output.op = CF_OP_MEM_STREAM0_BUF2;
1427 break;
1428 case 3:
1429 output.op = CF_OP_MEM_STREAM0_BUF3;
1430 break;
1431 }
1432 output.op += so->output[i].stream * 4;
1433 assert(output.op >= CF_OP_MEM_STREAM0_BUF0 && output.op <= CF_OP_MEM_STREAM3_BUF3);
1434 ctx->enabled_stream_buffers_mask |= (1 << so->output[i].output_buffer) << so->output[i].stream * 4;
1435 } else {
1436 switch (so->output[i].output_buffer) {
1437 case 0:
1438 output.op = CF_OP_MEM_STREAM0;
1439 break;
1440 case 1:
1441 output.op = CF_OP_MEM_STREAM1;
1442 break;
1443 case 2:
1444 output.op = CF_OP_MEM_STREAM2;
1445 break;
1446 case 3:
1447 output.op = CF_OP_MEM_STREAM3;
1448 break;
1449 }
1450 ctx->enabled_stream_buffers_mask |= 1 << so->output[i].output_buffer;
1451 }
1452 r = r600_bytecode_add_output(ctx->bc, &output);
1453 if (r)
1454 goto out_err;
1455 }
1456 return 0;
1457 out_err:
1458 return r;
1459 }
1460
1461 static void convert_edgeflag_to_int(struct r600_shader_ctx *ctx)
1462 {
1463 struct r600_bytecode_alu alu;
1464 unsigned reg;
1465
1466 if (!ctx->shader->vs_out_edgeflag)
1467 return;
1468
1469 reg = ctx->shader->output[ctx->edgeflag_output].gpr;
1470
1471 /* clamp(x, 0, 1) */
1472 memset(&alu, 0, sizeof(alu));
1473 alu.op = ALU_OP1_MOV;
1474 alu.src[0].sel = reg;
1475 alu.dst.sel = reg;
1476 alu.dst.write = 1;
1477 alu.dst.clamp = 1;
1478 alu.last = 1;
1479 r600_bytecode_add_alu(ctx->bc, &alu);
1480
1481 memset(&alu, 0, sizeof(alu));
1482 alu.op = ALU_OP1_FLT_TO_INT;
1483 alu.src[0].sel = reg;
1484 alu.dst.sel = reg;
1485 alu.dst.write = 1;
1486 alu.last = 1;
1487 r600_bytecode_add_alu(ctx->bc, &alu);
1488 }
1489
1490 static int generate_gs_copy_shader(struct r600_context *rctx,
1491 struct r600_pipe_shader *gs,
1492 struct pipe_stream_output_info *so)
1493 {
1494 struct r600_shader_ctx ctx = {};
1495 struct r600_shader *gs_shader = &gs->shader;
1496 struct r600_pipe_shader *cshader;
1497 int ocnt = gs_shader->noutput;
1498 struct r600_bytecode_alu alu;
1499 struct r600_bytecode_vtx vtx;
1500 struct r600_bytecode_output output;
1501 struct r600_bytecode_cf *cf_jump, *cf_pop,
1502 *last_exp_pos = NULL, *last_exp_param = NULL;
1503 int i, j, next_clip_pos = 61, next_param = 0;
1504 int ring;
1505
1506 cshader = calloc(1, sizeof(struct r600_pipe_shader));
1507 if (!cshader)
1508 return 0;
1509
1510 memcpy(cshader->shader.output, gs_shader->output, ocnt *
1511 sizeof(struct r600_shader_io));
1512
1513 cshader->shader.noutput = ocnt;
1514
1515 ctx.shader = &cshader->shader;
1516 ctx.bc = &ctx.shader->bc;
1517 ctx.type = ctx.bc->type = TGSI_PROCESSOR_VERTEX;
1518
1519 r600_bytecode_init(ctx.bc, rctx->b.chip_class, rctx->b.family,
1520 rctx->screen->has_compressed_msaa_texturing);
1521
1522 ctx.bc->isa = rctx->isa;
1523
1524 cf_jump = NULL;
1525 memset(cshader->shader.ring_item_sizes, 0, sizeof(cshader->shader.ring_item_sizes));
1526
1527 /* R0.x = R0.x & 0x3fffffff */
1528 memset(&alu, 0, sizeof(alu));
1529 alu.op = ALU_OP2_AND_INT;
1530 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
1531 alu.src[1].value = 0x3fffffff;
1532 alu.dst.write = 1;
1533 r600_bytecode_add_alu(ctx.bc, &alu);
1534
1535 /* R0.y = R0.x >> 30 */
1536 memset(&alu, 0, sizeof(alu));
1537 alu.op = ALU_OP2_LSHR_INT;
1538 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
1539 alu.src[1].value = 0x1e;
1540 alu.dst.chan = 1;
1541 alu.dst.write = 1;
1542 alu.last = 1;
1543 r600_bytecode_add_alu(ctx.bc, &alu);
1544
1545 /* fetch vertex data from GSVS ring */
1546 for (i = 0; i < ocnt; ++i) {
1547 struct r600_shader_io *out = &ctx.shader->output[i];
1548
1549 out->gpr = i + 1;
1550 out->ring_offset = i * 16;
1551
1552 memset(&vtx, 0, sizeof(vtx));
1553 vtx.op = FETCH_OP_VFETCH;
1554 vtx.buffer_id = R600_GS_RING_CONST_BUFFER;
1555 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
1556 vtx.offset = out->ring_offset;
1557 vtx.dst_gpr = out->gpr;
1558 vtx.src_gpr = 0;
1559 vtx.dst_sel_x = 0;
1560 vtx.dst_sel_y = 1;
1561 vtx.dst_sel_z = 2;
1562 vtx.dst_sel_w = 3;
1563 if (rctx->b.chip_class >= EVERGREEN) {
1564 vtx.use_const_fields = 1;
1565 } else {
1566 vtx.data_format = FMT_32_32_32_32_FLOAT;
1567 }
1568
1569 r600_bytecode_add_vtx(ctx.bc, &vtx);
1570 }
1571 ctx.temp_reg = i + 1;
1572 for (ring = 3; ring >= 0; --ring) {
1573 bool enabled = false;
1574 for (i = 0; i < so->num_outputs; i++) {
1575 if (so->output[i].stream == ring) {
1576 enabled = true;
1577 break;
1578 }
1579 }
1580 if (ring != 0 && !enabled) {
1581 cshader->shader.ring_item_sizes[ring] = 0;
1582 continue;
1583 }
1584
1585 if (cf_jump) {
1586 // Patch up jump label
1587 r600_bytecode_add_cfinst(ctx.bc, CF_OP_POP);
1588 cf_pop = ctx.bc->cf_last;
1589
1590 cf_jump->cf_addr = cf_pop->id + 2;
1591 cf_jump->pop_count = 1;
1592 cf_pop->cf_addr = cf_pop->id + 2;
1593 cf_pop->pop_count = 1;
1594 }
1595
1596 /* PRED_SETE_INT __, R0.y, ring */
1597 memset(&alu, 0, sizeof(alu));
1598 alu.op = ALU_OP2_PRED_SETE_INT;
1599 alu.src[0].chan = 1;
1600 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
1601 alu.src[1].value = ring;
1602 alu.execute_mask = 1;
1603 alu.update_pred = 1;
1604 alu.last = 1;
1605 r600_bytecode_add_alu_type(ctx.bc, &alu, CF_OP_ALU_PUSH_BEFORE);
1606
1607 r600_bytecode_add_cfinst(ctx.bc, CF_OP_JUMP);
1608 cf_jump = ctx.bc->cf_last;
1609
1610 if (enabled)
1611 emit_streamout(&ctx, so, ring, &cshader->shader.ring_item_sizes[ring]);
1612 cshader->shader.ring_item_sizes[ring] = ocnt * 16;
1613 }
1614
1615 /* export vertex data */
1616 /* XXX factor out common code with r600_shader_from_tgsi ? */
1617 for (i = 0; i < ocnt; ++i) {
1618 struct r600_shader_io *out = &ctx.shader->output[i];
1619 bool instream0 = true;
1620 if (out->name == TGSI_SEMANTIC_CLIPVERTEX)
1621 continue;
1622
1623 for (j = 0; j < so->num_outputs; j++) {
1624 if (so->output[j].register_index == i) {
1625 if (so->output[j].stream == 0)
1626 break;
1627 if (so->output[j].stream > 0)
1628 instream0 = false;
1629 }
1630 }
1631 if (!instream0)
1632 continue;
1633 memset(&output, 0, sizeof(output));
1634 output.gpr = out->gpr;
1635 output.elem_size = 3;
1636 output.swizzle_x = 0;
1637 output.swizzle_y = 1;
1638 output.swizzle_z = 2;
1639 output.swizzle_w = 3;
1640 output.burst_count = 1;
1641 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
1642 output.op = CF_OP_EXPORT;
1643 switch (out->name) {
1644 case TGSI_SEMANTIC_POSITION:
1645 output.array_base = 60;
1646 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1647 break;
1648
1649 case TGSI_SEMANTIC_PSIZE:
1650 output.array_base = 61;
1651 if (next_clip_pos == 61)
1652 next_clip_pos = 62;
1653 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1654 output.swizzle_y = 7;
1655 output.swizzle_z = 7;
1656 output.swizzle_w = 7;
1657 ctx.shader->vs_out_misc_write = 1;
1658 ctx.shader->vs_out_point_size = 1;
1659 break;
1660 case TGSI_SEMANTIC_LAYER:
1661 if (out->spi_sid) {
1662 /* duplicate it as PARAM to pass to the pixel shader */
1663 output.array_base = next_param++;
1664 r600_bytecode_add_output(ctx.bc, &output);
1665 last_exp_param = ctx.bc->cf_last;
1666 }
1667 output.array_base = 61;
1668 if (next_clip_pos == 61)
1669 next_clip_pos = 62;
1670 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1671 output.swizzle_x = 7;
1672 output.swizzle_y = 7;
1673 output.swizzle_z = 0;
1674 output.swizzle_w = 7;
1675 ctx.shader->vs_out_misc_write = 1;
1676 ctx.shader->vs_out_layer = 1;
1677 break;
1678 case TGSI_SEMANTIC_VIEWPORT_INDEX:
1679 if (out->spi_sid) {
1680 /* duplicate it as PARAM to pass to the pixel shader */
1681 output.array_base = next_param++;
1682 r600_bytecode_add_output(ctx.bc, &output);
1683 last_exp_param = ctx.bc->cf_last;
1684 }
1685 output.array_base = 61;
1686 if (next_clip_pos == 61)
1687 next_clip_pos = 62;
1688 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1689 ctx.shader->vs_out_misc_write = 1;
1690 ctx.shader->vs_out_viewport = 1;
1691 output.swizzle_x = 7;
1692 output.swizzle_y = 7;
1693 output.swizzle_z = 7;
1694 output.swizzle_w = 0;
1695 break;
1696 case TGSI_SEMANTIC_CLIPDIST:
1697 /* spi_sid is 0 for clipdistance outputs that were generated
1698 * for clipvertex - we don't need to pass them to PS */
1699 ctx.shader->clip_dist_write = gs->shader.clip_dist_write;
1700 if (out->spi_sid) {
1701 /* duplicate it as PARAM to pass to the pixel shader */
1702 output.array_base = next_param++;
1703 r600_bytecode_add_output(ctx.bc, &output);
1704 last_exp_param = ctx.bc->cf_last;
1705 }
1706 output.array_base = next_clip_pos++;
1707 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1708 break;
1709 case TGSI_SEMANTIC_FOG:
1710 output.swizzle_y = 4; /* 0 */
1711 output.swizzle_z = 4; /* 0 */
1712 output.swizzle_w = 5; /* 1 */
1713 break;
1714 default:
1715 output.array_base = next_param++;
1716 break;
1717 }
1718 r600_bytecode_add_output(ctx.bc, &output);
1719 if (output.type == V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM)
1720 last_exp_param = ctx.bc->cf_last;
1721 else
1722 last_exp_pos = ctx.bc->cf_last;
1723 }
1724
1725 if (!last_exp_pos) {
1726 memset(&output, 0, sizeof(output));
1727 output.gpr = 0;
1728 output.elem_size = 3;
1729 output.swizzle_x = 7;
1730 output.swizzle_y = 7;
1731 output.swizzle_z = 7;
1732 output.swizzle_w = 7;
1733 output.burst_count = 1;
1734 output.type = 2;
1735 output.op = CF_OP_EXPORT;
1736 output.array_base = 60;
1737 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
1738 r600_bytecode_add_output(ctx.bc, &output);
1739 last_exp_pos = ctx.bc->cf_last;
1740 }
1741
1742 if (!last_exp_param) {
1743 memset(&output, 0, sizeof(output));
1744 output.gpr = 0;
1745 output.elem_size = 3;
1746 output.swizzle_x = 7;
1747 output.swizzle_y = 7;
1748 output.swizzle_z = 7;
1749 output.swizzle_w = 7;
1750 output.burst_count = 1;
1751 output.type = 2;
1752 output.op = CF_OP_EXPORT;
1753 output.array_base = next_param++;
1754 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
1755 r600_bytecode_add_output(ctx.bc, &output);
1756 last_exp_param = ctx.bc->cf_last;
1757 }
1758
1759 last_exp_pos->op = CF_OP_EXPORT_DONE;
1760 last_exp_param->op = CF_OP_EXPORT_DONE;
1761
1762 r600_bytecode_add_cfinst(ctx.bc, CF_OP_POP);
1763 cf_pop = ctx.bc->cf_last;
1764
1765 cf_jump->cf_addr = cf_pop->id + 2;
1766 cf_jump->pop_count = 1;
1767 cf_pop->cf_addr = cf_pop->id + 2;
1768 cf_pop->pop_count = 1;
1769
1770 if (ctx.bc->chip_class == CAYMAN)
1771 cm_bytecode_add_cf_end(ctx.bc);
1772 else {
1773 r600_bytecode_add_cfinst(ctx.bc, CF_OP_NOP);
1774 ctx.bc->cf_last->end_of_program = 1;
1775 }
1776
1777 gs->gs_copy_shader = cshader;
1778 cshader->enabled_stream_buffers_mask = ctx.enabled_stream_buffers_mask;
1779
1780 ctx.bc->nstack = 1;
1781
1782 return r600_bytecode_build(ctx.bc);
1783 }
1784
1785 static int emit_gs_ring_writes(struct r600_shader_ctx *ctx, const struct pipe_stream_output_info *so, int stream, bool ind)
1786 {
1787 struct r600_bytecode_output output;
1788 int i, k, ring_offset;
1789 int effective_stream = stream == -1 ? 0 : stream;
1790 int idx = 0;
1791
1792 for (i = 0; i < ctx->shader->noutput; i++) {
1793 if (ctx->gs_for_vs) {
1794 /* for ES we need to lookup corresponding ring offset expected by GS
1795 * (map this output to GS input by name and sid) */
1796 /* FIXME precompute offsets */
1797 ring_offset = -1;
1798 for(k = 0; k < ctx->gs_for_vs->ninput; ++k) {
1799 struct r600_shader_io *in = &ctx->gs_for_vs->input[k];
1800 struct r600_shader_io *out = &ctx->shader->output[i];
1801 if (in->name == out->name && in->sid == out->sid)
1802 ring_offset = in->ring_offset;
1803 }
1804
1805 if (ring_offset == -1)
1806 continue;
1807 } else {
1808 ring_offset = idx * 16;
1809 idx++;
1810 }
1811
1812 if (stream > 0 && ctx->shader->output[i].name == TGSI_SEMANTIC_POSITION)
1813 continue;
1814 /* next_ring_offset after parsing input decls contains total size of
1815 * single vertex data, gs_next_vertex - current vertex index */
1816 if (!ind)
1817 ring_offset += ctx->gs_out_ring_offset * ctx->gs_next_vertex;
1818
1819 memset(&output, 0, sizeof(struct r600_bytecode_output));
1820 output.gpr = ctx->shader->output[i].gpr;
1821 output.elem_size = 3;
1822 output.comp_mask = 0xF;
1823 output.burst_count = 1;
1824
1825 if (ind)
1826 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND;
1827 else
1828 output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE;
1829
1830 switch (stream) {
1831 default:
1832 case 0:
1833 output.op = CF_OP_MEM_RING; break;
1834 case 1:
1835 output.op = CF_OP_MEM_RING1; break;
1836 case 2:
1837 output.op = CF_OP_MEM_RING2; break;
1838 case 3:
1839 output.op = CF_OP_MEM_RING3; break;
1840 }
1841
1842 if (ind) {
1843 output.array_base = ring_offset >> 2; /* in dwords */
1844 output.array_size = 0xfff;
1845 output.index_gpr = ctx->gs_export_gpr_tregs[effective_stream];
1846 } else
1847 output.array_base = ring_offset >> 2; /* in dwords */
1848 r600_bytecode_add_output(ctx->bc, &output);
1849 }
1850
1851 if (ind) {
1852 /* get a temp and add the ring offset to the next vertex base in the shader */
1853 struct r600_bytecode_alu alu;
1854 int r;
1855
1856 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
1857 alu.op = ALU_OP2_ADD_INT;
1858 alu.src[0].sel = ctx->gs_export_gpr_tregs[effective_stream];
1859 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
1860 alu.src[1].value = ctx->gs_out_ring_offset >> 4;
1861 alu.dst.sel = ctx->gs_export_gpr_tregs[effective_stream];
1862 alu.dst.write = 1;
1863 alu.last = 1;
1864 r = r600_bytecode_add_alu(ctx->bc, &alu);
1865 if (r)
1866 return r;
1867 }
1868 ++ctx->gs_next_vertex;
1869 return 0;
1870 }
1871
1872 static int r600_shader_from_tgsi(struct r600_context *rctx,
1873 struct r600_pipe_shader *pipeshader,
1874 union r600_shader_key key)
1875 {
1876 struct r600_screen *rscreen = rctx->screen;
1877 struct r600_shader *shader = &pipeshader->shader;
1878 struct tgsi_token *tokens = pipeshader->selector->tokens;
1879 struct pipe_stream_output_info so = pipeshader->selector->so;
1880 struct tgsi_full_immediate *immediate;
1881 struct r600_shader_ctx ctx;
1882 struct r600_bytecode_output output[32];
1883 unsigned output_done, noutput;
1884 unsigned opcode;
1885 int i, j, k, r = 0;
1886 int next_param_base = 0, next_clip_base;
1887 int max_color_exports = MAX2(key.ps.nr_cbufs, 1);
1888 /* Declarations used by llvm code */
1889 bool use_llvm = false;
1890 bool indirect_gprs;
1891 bool ring_outputs = false;
1892 bool pos_emitted = false;
1893
1894 #ifdef R600_USE_LLVM
1895 use_llvm = rscreen->b.debug_flags & DBG_LLVM;
1896 #endif
1897 ctx.bc = &shader->bc;
1898 ctx.shader = shader;
1899 ctx.native_integers = true;
1900
1901
1902 r600_bytecode_init(ctx.bc, rscreen->b.chip_class, rscreen->b.family,
1903 rscreen->has_compressed_msaa_texturing);
1904 ctx.tokens = tokens;
1905 tgsi_scan_shader(tokens, &ctx.info);
1906 shader->indirect_files = ctx.info.indirect_files;
1907
1908 shader->uses_doubles = ctx.info.uses_doubles;
1909
1910 indirect_gprs = ctx.info.indirect_files & ~((1 << TGSI_FILE_CONSTANT) | (1 << TGSI_FILE_SAMPLER));
1911 tgsi_parse_init(&ctx.parse, tokens);
1912 ctx.type = ctx.info.processor;
1913 shader->processor_type = ctx.type;
1914 ctx.bc->type = shader->processor_type;
1915
1916 if (ctx.type == TGSI_PROCESSOR_VERTEX) {
1917 shader->vs_as_gs_a = key.vs.as_gs_a;
1918 shader->vs_as_es = key.vs.as_es;
1919 }
1920
1921 ring_outputs = shader->vs_as_es || ctx.type == TGSI_PROCESSOR_GEOMETRY;
1922
1923 if (shader->vs_as_es) {
1924 ctx.gs_for_vs = &rctx->gs_shader->current->shader;
1925 } else {
1926 ctx.gs_for_vs = NULL;
1927 }
1928
1929 ctx.next_ring_offset = 0;
1930 ctx.gs_out_ring_offset = 0;
1931 ctx.gs_next_vertex = 0;
1932 ctx.gs_stream_output_info = &so;
1933
1934 ctx.face_gpr = -1;
1935 ctx.fixed_pt_position_gpr = -1;
1936 ctx.fragcoord_input = -1;
1937 ctx.colors_used = 0;
1938 ctx.clip_vertex_write = 0;
1939
1940 shader->nr_ps_color_exports = 0;
1941 shader->nr_ps_max_color_exports = 0;
1942
1943 if (ctx.type == TGSI_PROCESSOR_FRAGMENT)
1944 shader->two_side = key.ps.color_two_side;
1945
1946 /* register allocations */
1947 /* Values [0,127] correspond to GPR[0..127].
1948 * Values [128,159] correspond to constant buffer bank 0
1949 * Values [160,191] correspond to constant buffer bank 1
1950 * Values [256,511] correspond to cfile constants c[0..255]. (Gone on EG)
1951 * Values [256,287] correspond to constant buffer bank 2 (EG)
1952 * Values [288,319] correspond to constant buffer bank 3 (EG)
1953 * Other special values are shown in the list below.
1954 * 244 ALU_SRC_1_DBL_L: special constant 1.0 double-float, LSW. (RV670+)
1955 * 245 ALU_SRC_1_DBL_M: special constant 1.0 double-float, MSW. (RV670+)
1956 * 246 ALU_SRC_0_5_DBL_L: special constant 0.5 double-float, LSW. (RV670+)
1957 * 247 ALU_SRC_0_5_DBL_M: special constant 0.5 double-float, MSW. (RV670+)
1958 * 248 SQ_ALU_SRC_0: special constant 0.0.
1959 * 249 SQ_ALU_SRC_1: special constant 1.0 float.
1960 * 250 SQ_ALU_SRC_1_INT: special constant 1 integer.
1961 * 251 SQ_ALU_SRC_M_1_INT: special constant -1 integer.
1962 * 252 SQ_ALU_SRC_0_5: special constant 0.5 float.
1963 * 253 SQ_ALU_SRC_LITERAL: literal constant.
1964 * 254 SQ_ALU_SRC_PV: previous vector result.
1965 * 255 SQ_ALU_SRC_PS: previous scalar result.
1966 */
1967 for (i = 0; i < TGSI_FILE_COUNT; i++) {
1968 ctx.file_offset[i] = 0;
1969 }
1970
1971 #ifdef R600_USE_LLVM
1972 if (use_llvm && ctx.info.indirect_files && (ctx.info.indirect_files & (1 << TGSI_FILE_CONSTANT)) != ctx.info.indirect_files) {
1973 fprintf(stderr, "Warning: R600 LLVM backend does not support "
1974 "indirect adressing. Falling back to TGSI "
1975 "backend.\n");
1976 use_llvm = 0;
1977 }
1978 #endif
1979 if (ctx.type == TGSI_PROCESSOR_VERTEX) {
1980 ctx.file_offset[TGSI_FILE_INPUT] = 1;
1981 if (!use_llvm) {
1982 r600_bytecode_add_cfinst(ctx.bc, CF_OP_CALL_FS);
1983 }
1984 }
1985 if (ctx.type == TGSI_PROCESSOR_FRAGMENT) {
1986 if (ctx.bc->chip_class >= EVERGREEN)
1987 ctx.file_offset[TGSI_FILE_INPUT] = evergreen_gpr_count(&ctx);
1988 else
1989 ctx.file_offset[TGSI_FILE_INPUT] = allocate_system_value_inputs(&ctx, ctx.file_offset[TGSI_FILE_INPUT]);
1990 }
1991 if (ctx.type == TGSI_PROCESSOR_GEOMETRY) {
1992 /* FIXME 1 would be enough in some cases (3 or less input vertices) */
1993 ctx.file_offset[TGSI_FILE_INPUT] = 2;
1994 }
1995 ctx.use_llvm = use_llvm;
1996
1997 if (use_llvm) {
1998 ctx.file_offset[TGSI_FILE_OUTPUT] =
1999 ctx.file_offset[TGSI_FILE_INPUT];
2000 } else {
2001 ctx.file_offset[TGSI_FILE_OUTPUT] =
2002 ctx.file_offset[TGSI_FILE_INPUT] +
2003 ctx.info.file_max[TGSI_FILE_INPUT] + 1;
2004 }
2005 ctx.file_offset[TGSI_FILE_TEMPORARY] = ctx.file_offset[TGSI_FILE_OUTPUT] +
2006 ctx.info.file_max[TGSI_FILE_OUTPUT] + 1;
2007
2008 /* Outside the GPR range. This will be translated to one of the
2009 * kcache banks later. */
2010 ctx.file_offset[TGSI_FILE_CONSTANT] = 512;
2011
2012 ctx.file_offset[TGSI_FILE_IMMEDIATE] = V_SQ_ALU_SRC_LITERAL;
2013 ctx.bc->ar_reg = ctx.file_offset[TGSI_FILE_TEMPORARY] +
2014 ctx.info.file_max[TGSI_FILE_TEMPORARY] + 1;
2015 ctx.bc->index_reg[0] = ctx.bc->ar_reg + 1;
2016 ctx.bc->index_reg[1] = ctx.bc->ar_reg + 2;
2017
2018 if (ctx.type == TGSI_PROCESSOR_GEOMETRY) {
2019 ctx.gs_export_gpr_tregs[0] = ctx.bc->ar_reg + 3;
2020 ctx.gs_export_gpr_tregs[1] = ctx.bc->ar_reg + 4;
2021 ctx.gs_export_gpr_tregs[2] = ctx.bc->ar_reg + 5;
2022 ctx.gs_export_gpr_tregs[3] = ctx.bc->ar_reg + 6;
2023 ctx.temp_reg = ctx.bc->ar_reg + 7;
2024 } else {
2025 ctx.temp_reg = ctx.bc->ar_reg + 3;
2026 }
2027
2028 shader->max_arrays = 0;
2029 shader->num_arrays = 0;
2030 if (indirect_gprs) {
2031
2032 if (ctx.info.indirect_files & (1 << TGSI_FILE_INPUT)) {
2033 r600_add_gpr_array(shader, ctx.file_offset[TGSI_FILE_INPUT],
2034 ctx.file_offset[TGSI_FILE_OUTPUT] -
2035 ctx.file_offset[TGSI_FILE_INPUT],
2036 0x0F);
2037 }
2038 if (ctx.info.indirect_files & (1 << TGSI_FILE_OUTPUT)) {
2039 r600_add_gpr_array(shader, ctx.file_offset[TGSI_FILE_OUTPUT],
2040 ctx.file_offset[TGSI_FILE_TEMPORARY] -
2041 ctx.file_offset[TGSI_FILE_OUTPUT],
2042 0x0F);
2043 }
2044 }
2045
2046 ctx.nliterals = 0;
2047 ctx.literals = NULL;
2048
2049 shader->fs_write_all = ctx.info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS];
2050 shader->vs_position_window_space = ctx.info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
2051
2052 if (shader->vs_as_gs_a)
2053 vs_add_primid_output(&ctx, key.vs.prim_id_out);
2054
2055 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
2056 tgsi_parse_token(&ctx.parse);
2057 switch (ctx.parse.FullToken.Token.Type) {
2058 case TGSI_TOKEN_TYPE_IMMEDIATE:
2059 immediate = &ctx.parse.FullToken.FullImmediate;
2060 ctx.literals = realloc(ctx.literals, (ctx.nliterals + 1) * 16);
2061 if(ctx.literals == NULL) {
2062 r = -ENOMEM;
2063 goto out_err;
2064 }
2065 ctx.literals[ctx.nliterals * 4 + 0] = immediate->u[0].Uint;
2066 ctx.literals[ctx.nliterals * 4 + 1] = immediate->u[1].Uint;
2067 ctx.literals[ctx.nliterals * 4 + 2] = immediate->u[2].Uint;
2068 ctx.literals[ctx.nliterals * 4 + 3] = immediate->u[3].Uint;
2069 ctx.nliterals++;
2070 break;
2071 case TGSI_TOKEN_TYPE_DECLARATION:
2072 r = tgsi_declaration(&ctx);
2073 if (r)
2074 goto out_err;
2075 break;
2076 case TGSI_TOKEN_TYPE_INSTRUCTION:
2077 case TGSI_TOKEN_TYPE_PROPERTY:
2078 break;
2079 default:
2080 R600_ERR("unsupported token type %d\n", ctx.parse.FullToken.Token.Type);
2081 r = -EINVAL;
2082 goto out_err;
2083 }
2084 }
2085
2086 shader->ring_item_sizes[0] = ctx.next_ring_offset;
2087 shader->ring_item_sizes[1] = 0;
2088 shader->ring_item_sizes[2] = 0;
2089 shader->ring_item_sizes[3] = 0;
2090
2091 /* Process two side if needed */
2092 if (shader->two_side && ctx.colors_used) {
2093 int i, count = ctx.shader->ninput;
2094 unsigned next_lds_loc = ctx.shader->nlds;
2095
2096 /* additional inputs will be allocated right after the existing inputs,
2097 * we won't need them after the color selection, so we don't need to
2098 * reserve these gprs for the rest of the shader code and to adjust
2099 * output offsets etc. */
2100 int gpr = ctx.file_offset[TGSI_FILE_INPUT] +
2101 ctx.info.file_max[TGSI_FILE_INPUT] + 1;
2102
2103 /* if two sided and neither face or sample mask is used by shader, ensure face_gpr is emitted */
2104 if (ctx.face_gpr == -1) {
2105 i = ctx.shader->ninput++;
2106 ctx.shader->input[i].name = TGSI_SEMANTIC_FACE;
2107 ctx.shader->input[i].spi_sid = 0;
2108 ctx.shader->input[i].gpr = gpr++;
2109 ctx.face_gpr = ctx.shader->input[i].gpr;
2110 }
2111
2112 for (i = 0; i < count; i++) {
2113 if (ctx.shader->input[i].name == TGSI_SEMANTIC_COLOR) {
2114 int ni = ctx.shader->ninput++;
2115 memcpy(&ctx.shader->input[ni],&ctx.shader->input[i], sizeof(struct r600_shader_io));
2116 ctx.shader->input[ni].name = TGSI_SEMANTIC_BCOLOR;
2117 ctx.shader->input[ni].spi_sid = r600_spi_sid(&ctx.shader->input[ni]);
2118 ctx.shader->input[ni].gpr = gpr++;
2119 // TGSI to LLVM needs to know the lds position of inputs.
2120 // Non LLVM path computes it later (in process_twoside_color)
2121 ctx.shader->input[ni].lds_pos = next_lds_loc++;
2122 ctx.shader->input[i].back_color_input = ni;
2123 if (ctx.bc->chip_class >= EVERGREEN) {
2124 if ((r = evergreen_interp_input(&ctx, ni)))
2125 return r;
2126 }
2127 }
2128 }
2129 }
2130
2131 /* LLVM backend setup */
2132 #ifdef R600_USE_LLVM
2133 if (use_llvm) {
2134 struct radeon_llvm_context radeon_llvm_ctx;
2135 LLVMModuleRef mod;
2136 bool dump = r600_can_dump_shader(&rscreen->b, tokens);
2137 boolean use_kill = false;
2138
2139 memset(&radeon_llvm_ctx, 0, sizeof(radeon_llvm_ctx));
2140 radeon_llvm_ctx.type = ctx.type;
2141 radeon_llvm_ctx.two_side = shader->two_side;
2142 radeon_llvm_ctx.face_gpr = ctx.face_gpr;
2143 radeon_llvm_ctx.inputs_count = ctx.shader->ninput + 1;
2144 radeon_llvm_ctx.r600_inputs = ctx.shader->input;
2145 radeon_llvm_ctx.r600_outputs = ctx.shader->output;
2146 radeon_llvm_ctx.color_buffer_count = max_color_exports;
2147 radeon_llvm_ctx.chip_class = ctx.bc->chip_class;
2148 radeon_llvm_ctx.fs_color_all = shader->fs_write_all && (rscreen->b.chip_class >= EVERGREEN);
2149 radeon_llvm_ctx.stream_outputs = &so;
2150 radeon_llvm_ctx.alpha_to_one = key.ps.alpha_to_one;
2151 radeon_llvm_ctx.has_compressed_msaa_texturing =
2152 ctx.bc->has_compressed_msaa_texturing;
2153 mod = r600_tgsi_llvm(&radeon_llvm_ctx, tokens);
2154 ctx.shader->has_txq_cube_array_z_comp = radeon_llvm_ctx.has_txq_cube_array_z_comp;
2155 ctx.shader->uses_tex_buffers = radeon_llvm_ctx.uses_tex_buffers;
2156
2157 if (r600_llvm_compile(mod, rscreen->b.family, ctx.bc, &use_kill, dump)) {
2158 radeon_llvm_dispose(&radeon_llvm_ctx);
2159 use_llvm = 0;
2160 fprintf(stderr, "R600 LLVM backend failed to compile "
2161 "shader. Falling back to TGSI\n");
2162 } else {
2163 ctx.file_offset[TGSI_FILE_OUTPUT] =
2164 ctx.file_offset[TGSI_FILE_INPUT];
2165 }
2166 if (use_kill)
2167 ctx.shader->uses_kill = use_kill;
2168 radeon_llvm_dispose(&radeon_llvm_ctx);
2169 }
2170 #endif
2171 /* End of LLVM backend setup */
2172
2173 if (shader->fs_write_all && rscreen->b.chip_class >= EVERGREEN)
2174 shader->nr_ps_max_color_exports = 8;
2175
2176 if (!use_llvm) {
2177 if (ctx.fragcoord_input >= 0) {
2178 if (ctx.bc->chip_class == CAYMAN) {
2179 for (j = 0 ; j < 4; j++) {
2180 struct r600_bytecode_alu alu;
2181 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2182 alu.op = ALU_OP1_RECIP_IEEE;
2183 alu.src[0].sel = shader->input[ctx.fragcoord_input].gpr;
2184 alu.src[0].chan = 3;
2185
2186 alu.dst.sel = shader->input[ctx.fragcoord_input].gpr;
2187 alu.dst.chan = j;
2188 alu.dst.write = (j == 3);
2189 alu.last = 1;
2190 if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
2191 return r;
2192 }
2193 } else {
2194 struct r600_bytecode_alu alu;
2195 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2196 alu.op = ALU_OP1_RECIP_IEEE;
2197 alu.src[0].sel = shader->input[ctx.fragcoord_input].gpr;
2198 alu.src[0].chan = 3;
2199
2200 alu.dst.sel = shader->input[ctx.fragcoord_input].gpr;
2201 alu.dst.chan = 3;
2202 alu.dst.write = 1;
2203 alu.last = 1;
2204 if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
2205 return r;
2206 }
2207 }
2208
2209 if (ctx.type == TGSI_PROCESSOR_GEOMETRY) {
2210 struct r600_bytecode_alu alu;
2211 int r;
2212 for (j = 0; j < 4; j++) {
2213 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2214 alu.op = ALU_OP1_MOV;
2215 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
2216 alu.src[0].value = 0;
2217 alu.dst.sel = ctx.gs_export_gpr_tregs[j];
2218 alu.dst.write = 1;
2219 alu.last = 1;
2220 r = r600_bytecode_add_alu(ctx.bc, &alu);
2221 if (r)
2222 return r;
2223 }
2224 }
2225 if (shader->two_side && ctx.colors_used) {
2226 if ((r = process_twoside_color_inputs(&ctx)))
2227 return r;
2228 }
2229
2230 tgsi_parse_init(&ctx.parse, tokens);
2231 while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
2232 tgsi_parse_token(&ctx.parse);
2233 switch (ctx.parse.FullToken.Token.Type) {
2234 case TGSI_TOKEN_TYPE_INSTRUCTION:
2235 r = tgsi_is_supported(&ctx);
2236 if (r)
2237 goto out_err;
2238 ctx.max_driver_temp_used = 0;
2239 /* reserve first tmp for everyone */
2240 r600_get_temp(&ctx);
2241
2242 opcode = ctx.parse.FullToken.FullInstruction.Instruction.Opcode;
2243 if ((r = tgsi_split_constant(&ctx)))
2244 goto out_err;
2245 if ((r = tgsi_split_literal_constant(&ctx)))
2246 goto out_err;
2247 if (ctx.type == TGSI_PROCESSOR_GEOMETRY)
2248 if ((r = tgsi_split_gs_inputs(&ctx)))
2249 goto out_err;
2250 if (ctx.bc->chip_class == CAYMAN)
2251 ctx.inst_info = &cm_shader_tgsi_instruction[opcode];
2252 else if (ctx.bc->chip_class >= EVERGREEN)
2253 ctx.inst_info = &eg_shader_tgsi_instruction[opcode];
2254 else
2255 ctx.inst_info = &r600_shader_tgsi_instruction[opcode];
2256 r = ctx.inst_info->process(&ctx);
2257 if (r)
2258 goto out_err;
2259 break;
2260 default:
2261 break;
2262 }
2263 }
2264 }
2265
2266 /* Reset the temporary register counter. */
2267 ctx.max_driver_temp_used = 0;
2268
2269 noutput = shader->noutput;
2270
2271 if (!ring_outputs && ctx.clip_vertex_write) {
2272 unsigned clipdist_temp[2];
2273
2274 clipdist_temp[0] = r600_get_temp(&ctx);
2275 clipdist_temp[1] = r600_get_temp(&ctx);
2276
2277 /* need to convert a clipvertex write into clipdistance writes and not export
2278 the clip vertex anymore */
2279
2280 memset(&shader->output[noutput], 0, 2*sizeof(struct r600_shader_io));
2281 shader->output[noutput].name = TGSI_SEMANTIC_CLIPDIST;
2282 shader->output[noutput].gpr = clipdist_temp[0];
2283 noutput++;
2284 shader->output[noutput].name = TGSI_SEMANTIC_CLIPDIST;
2285 shader->output[noutput].gpr = clipdist_temp[1];
2286 noutput++;
2287
2288 /* reset spi_sid for clipvertex output to avoid confusing spi */
2289 shader->output[ctx.cv_output].spi_sid = 0;
2290
2291 shader->clip_dist_write = 0xFF;
2292
2293 for (i = 0; i < 8; i++) {
2294 int oreg = i >> 2;
2295 int ochan = i & 3;
2296
2297 for (j = 0; j < 4; j++) {
2298 struct r600_bytecode_alu alu;
2299 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2300 alu.op = ALU_OP2_DOT4;
2301 alu.src[0].sel = shader->output[ctx.cv_output].gpr;
2302 alu.src[0].chan = j;
2303
2304 alu.src[1].sel = 512 + i;
2305 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
2306 alu.src[1].chan = j;
2307
2308 alu.dst.sel = clipdist_temp[oreg];
2309 alu.dst.chan = j;
2310 alu.dst.write = (j == ochan);
2311 if (j == 3)
2312 alu.last = 1;
2313 if (!use_llvm)
2314 r = r600_bytecode_add_alu(ctx.bc, &alu);
2315 if (r)
2316 return r;
2317 }
2318 }
2319 }
2320
2321 /* Add stream outputs. */
2322 if (!ring_outputs && ctx.type == TGSI_PROCESSOR_VERTEX &&
2323 so.num_outputs && !use_llvm)
2324 emit_streamout(&ctx, &so, -1, NULL);
2325
2326 pipeshader->enabled_stream_buffers_mask = ctx.enabled_stream_buffers_mask;
2327 convert_edgeflag_to_int(&ctx);
2328
2329 if (ring_outputs) {
2330 if (shader->vs_as_es) {
2331 ctx.gs_export_gpr_tregs[0] = r600_get_temp(&ctx);
2332 ctx.gs_export_gpr_tregs[1] = -1;
2333 ctx.gs_export_gpr_tregs[2] = -1;
2334 ctx.gs_export_gpr_tregs[3] = -1;
2335
2336 emit_gs_ring_writes(&ctx, &so, -1, FALSE);
2337 }
2338 } else {
2339 /* Export output */
2340 next_clip_base = shader->vs_out_misc_write ? 62 : 61;
2341
2342 for (i = 0, j = 0; i < noutput; i++, j++) {
2343 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
2344 output[j].gpr = shader->output[i].gpr;
2345 output[j].elem_size = 3;
2346 output[j].swizzle_x = 0;
2347 output[j].swizzle_y = 1;
2348 output[j].swizzle_z = 2;
2349 output[j].swizzle_w = 3;
2350 output[j].burst_count = 1;
2351 output[j].type = -1;
2352 output[j].op = CF_OP_EXPORT;
2353 switch (ctx.type) {
2354 case TGSI_PROCESSOR_VERTEX:
2355 switch (shader->output[i].name) {
2356 case TGSI_SEMANTIC_POSITION:
2357 output[j].array_base = 60;
2358 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2359 pos_emitted = true;
2360 break;
2361
2362 case TGSI_SEMANTIC_PSIZE:
2363 output[j].array_base = 61;
2364 output[j].swizzle_y = 7;
2365 output[j].swizzle_z = 7;
2366 output[j].swizzle_w = 7;
2367 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2368 pos_emitted = true;
2369 break;
2370 case TGSI_SEMANTIC_EDGEFLAG:
2371 output[j].array_base = 61;
2372 output[j].swizzle_x = 7;
2373 output[j].swizzle_y = 0;
2374 output[j].swizzle_z = 7;
2375 output[j].swizzle_w = 7;
2376 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2377 pos_emitted = true;
2378 break;
2379 case TGSI_SEMANTIC_LAYER:
2380 /* spi_sid is 0 for outputs that are
2381 * not consumed by PS */
2382 if (shader->output[i].spi_sid) {
2383 output[j].array_base = next_param_base++;
2384 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2385 j++;
2386 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
2387 }
2388 output[j].array_base = 61;
2389 output[j].swizzle_x = 7;
2390 output[j].swizzle_y = 7;
2391 output[j].swizzle_z = 0;
2392 output[j].swizzle_w = 7;
2393 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2394 pos_emitted = true;
2395 break;
2396 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2397 /* spi_sid is 0 for outputs that are
2398 * not consumed by PS */
2399 if (shader->output[i].spi_sid) {
2400 output[j].array_base = next_param_base++;
2401 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2402 j++;
2403 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
2404 }
2405 output[j].array_base = 61;
2406 output[j].swizzle_x = 7;
2407 output[j].swizzle_y = 7;
2408 output[j].swizzle_z = 7;
2409 output[j].swizzle_w = 0;
2410 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2411 pos_emitted = true;
2412 break;
2413 case TGSI_SEMANTIC_CLIPVERTEX:
2414 j--;
2415 break;
2416 case TGSI_SEMANTIC_CLIPDIST:
2417 output[j].array_base = next_clip_base++;
2418 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2419 pos_emitted = true;
2420 /* spi_sid is 0 for clipdistance outputs that were generated
2421 * for clipvertex - we don't need to pass them to PS */
2422 if (shader->output[i].spi_sid) {
2423 j++;
2424 /* duplicate it as PARAM to pass to the pixel shader */
2425 memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output));
2426 output[j].array_base = next_param_base++;
2427 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2428 }
2429 break;
2430 case TGSI_SEMANTIC_FOG:
2431 output[j].swizzle_y = 4; /* 0 */
2432 output[j].swizzle_z = 4; /* 0 */
2433 output[j].swizzle_w = 5; /* 1 */
2434 break;
2435 case TGSI_SEMANTIC_PRIMID:
2436 output[j].swizzle_x = 2;
2437 output[j].swizzle_y = 4; /* 0 */
2438 output[j].swizzle_z = 4; /* 0 */
2439 output[j].swizzle_w = 4; /* 0 */
2440 break;
2441 }
2442
2443 break;
2444 case TGSI_PROCESSOR_FRAGMENT:
2445 if (shader->output[i].name == TGSI_SEMANTIC_COLOR) {
2446 /* never export more colors than the number of CBs */
2447 if (shader->output[i].sid >= max_color_exports) {
2448 /* skip export */
2449 j--;
2450 continue;
2451 }
2452 output[j].swizzle_w = key.ps.alpha_to_one ? 5 : 3;
2453 output[j].array_base = shader->output[i].sid;
2454 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
2455 shader->nr_ps_color_exports++;
2456 if (shader->fs_write_all && (rscreen->b.chip_class >= EVERGREEN)) {
2457 for (k = 1; k < max_color_exports; k++) {
2458 j++;
2459 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
2460 output[j].gpr = shader->output[i].gpr;
2461 output[j].elem_size = 3;
2462 output[j].swizzle_x = 0;
2463 output[j].swizzle_y = 1;
2464 output[j].swizzle_z = 2;
2465 output[j].swizzle_w = key.ps.alpha_to_one ? 5 : 3;
2466 output[j].burst_count = 1;
2467 output[j].array_base = k;
2468 output[j].op = CF_OP_EXPORT;
2469 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
2470 shader->nr_ps_color_exports++;
2471 }
2472 }
2473 } else if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
2474 output[j].array_base = 61;
2475 output[j].swizzle_x = 2;
2476 output[j].swizzle_y = 7;
2477 output[j].swizzle_z = output[j].swizzle_w = 7;
2478 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
2479 } else if (shader->output[i].name == TGSI_SEMANTIC_STENCIL) {
2480 output[j].array_base = 61;
2481 output[j].swizzle_x = 7;
2482 output[j].swizzle_y = 1;
2483 output[j].swizzle_z = output[j].swizzle_w = 7;
2484 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
2485 } else if (shader->output[i].name == TGSI_SEMANTIC_SAMPLEMASK) {
2486 output[j].array_base = 61;
2487 output[j].swizzle_x = 7;
2488 output[j].swizzle_y = 7;
2489 output[j].swizzle_z = 0;
2490 output[j].swizzle_w = 7;
2491 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
2492 } else {
2493 R600_ERR("unsupported fragment output name %d\n", shader->output[i].name);
2494 r = -EINVAL;
2495 goto out_err;
2496 }
2497 break;
2498 default:
2499 R600_ERR("unsupported processor type %d\n", ctx.type);
2500 r = -EINVAL;
2501 goto out_err;
2502 }
2503
2504 if (output[j].type==-1) {
2505 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2506 output[j].array_base = next_param_base++;
2507 }
2508 }
2509
2510 /* add fake position export */
2511 if (ctx.type == TGSI_PROCESSOR_VERTEX && pos_emitted == false) {
2512 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
2513 output[j].gpr = 0;
2514 output[j].elem_size = 3;
2515 output[j].swizzle_x = 7;
2516 output[j].swizzle_y = 7;
2517 output[j].swizzle_z = 7;
2518 output[j].swizzle_w = 7;
2519 output[j].burst_count = 1;
2520 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS;
2521 output[j].array_base = 60;
2522 output[j].op = CF_OP_EXPORT;
2523 j++;
2524 }
2525
2526 /* add fake param output for vertex shader if no param is exported */
2527 if (ctx.type == TGSI_PROCESSOR_VERTEX && next_param_base == 0) {
2528 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
2529 output[j].gpr = 0;
2530 output[j].elem_size = 3;
2531 output[j].swizzle_x = 7;
2532 output[j].swizzle_y = 7;
2533 output[j].swizzle_z = 7;
2534 output[j].swizzle_w = 7;
2535 output[j].burst_count = 1;
2536 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
2537 output[j].array_base = 0;
2538 output[j].op = CF_OP_EXPORT;
2539 j++;
2540 }
2541
2542 /* add fake pixel export */
2543 if (ctx.type == TGSI_PROCESSOR_FRAGMENT && shader->nr_ps_color_exports == 0) {
2544 memset(&output[j], 0, sizeof(struct r600_bytecode_output));
2545 output[j].gpr = 0;
2546 output[j].elem_size = 3;
2547 output[j].swizzle_x = 7;
2548 output[j].swizzle_y = 7;
2549 output[j].swizzle_z = 7;
2550 output[j].swizzle_w = 7;
2551 output[j].burst_count = 1;
2552 output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
2553 output[j].array_base = 0;
2554 output[j].op = CF_OP_EXPORT;
2555 j++;
2556 shader->nr_ps_color_exports++;
2557 }
2558
2559 noutput = j;
2560
2561 /* set export done on last export of each type */
2562 for (i = noutput - 1, output_done = 0; i >= 0; i--) {
2563 if (!(output_done & (1 << output[i].type))) {
2564 output_done |= (1 << output[i].type);
2565 output[i].op = CF_OP_EXPORT_DONE;
2566 }
2567 }
2568 /* add output to bytecode */
2569 if (!use_llvm) {
2570 for (i = 0; i < noutput; i++) {
2571 r = r600_bytecode_add_output(ctx.bc, &output[i]);
2572 if (r)
2573 goto out_err;
2574 }
2575 }
2576 }
2577
2578 /* add program end */
2579 if (!use_llvm) {
2580 if (ctx.bc->chip_class == CAYMAN)
2581 cm_bytecode_add_cf_end(ctx.bc);
2582 else {
2583 const struct cf_op_info *last = NULL;
2584
2585 if (ctx.bc->cf_last)
2586 last = r600_isa_cf(ctx.bc->cf_last->op);
2587
2588 /* alu clause instructions don't have EOP bit, so add NOP */
2589 if (!last || last->flags & CF_ALU || ctx.bc->cf_last->op == CF_OP_LOOP_END || ctx.bc->cf_last->op == CF_OP_CALL_FS)
2590 r600_bytecode_add_cfinst(ctx.bc, CF_OP_NOP);
2591
2592 ctx.bc->cf_last->end_of_program = 1;
2593 }
2594 }
2595
2596 /* check GPR limit - we have 124 = 128 - 4
2597 * (4 are reserved as alu clause temporary registers) */
2598 if (ctx.bc->ngpr > 124) {
2599 R600_ERR("GPR limit exceeded - shader requires %d registers\n", ctx.bc->ngpr);
2600 r = -ENOMEM;
2601 goto out_err;
2602 }
2603
2604 if (ctx.type == TGSI_PROCESSOR_GEOMETRY) {
2605 if ((r = generate_gs_copy_shader(rctx, pipeshader, &so)))
2606 return r;
2607 }
2608
2609 free(ctx.literals);
2610 tgsi_parse_free(&ctx.parse);
2611 return 0;
2612 out_err:
2613 free(ctx.literals);
2614 tgsi_parse_free(&ctx.parse);
2615 return r;
2616 }
2617
2618 static int tgsi_unsupported(struct r600_shader_ctx *ctx)
2619 {
2620 const unsigned tgsi_opcode =
2621 ctx->parse.FullToken.FullInstruction.Instruction.Opcode;
2622 R600_ERR("%s tgsi opcode unsupported\n",
2623 tgsi_get_opcode_name(tgsi_opcode));
2624 return -EINVAL;
2625 }
2626
2627 static int tgsi_end(struct r600_shader_ctx *ctx)
2628 {
2629 return 0;
2630 }
2631
2632 static void r600_bytecode_src(struct r600_bytecode_alu_src *bc_src,
2633 const struct r600_shader_src *shader_src,
2634 unsigned chan)
2635 {
2636 bc_src->sel = shader_src->sel;
2637 bc_src->chan = shader_src->swizzle[chan];
2638 bc_src->neg = shader_src->neg;
2639 bc_src->abs = shader_src->abs;
2640 bc_src->rel = shader_src->rel;
2641 bc_src->value = shader_src->value[bc_src->chan];
2642 bc_src->kc_bank = shader_src->kc_bank;
2643 bc_src->kc_rel = shader_src->kc_rel;
2644 }
2645
2646 static void r600_bytecode_src_set_abs(struct r600_bytecode_alu_src *bc_src)
2647 {
2648 bc_src->abs = 1;
2649 bc_src->neg = 0;
2650 }
2651
2652 static void r600_bytecode_src_toggle_neg(struct r600_bytecode_alu_src *bc_src)
2653 {
2654 bc_src->neg = !bc_src->neg;
2655 }
2656
2657 static void tgsi_dst(struct r600_shader_ctx *ctx,
2658 const struct tgsi_full_dst_register *tgsi_dst,
2659 unsigned swizzle,
2660 struct r600_bytecode_alu_dst *r600_dst)
2661 {
2662 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2663
2664 r600_dst->sel = tgsi_dst->Register.Index;
2665 r600_dst->sel += ctx->file_offset[tgsi_dst->Register.File];
2666 r600_dst->chan = swizzle;
2667 r600_dst->write = 1;
2668 if (tgsi_dst->Register.Indirect)
2669 r600_dst->rel = V_SQ_REL_RELATIVE;
2670 if (inst->Instruction.Saturate) {
2671 r600_dst->clamp = 1;
2672 }
2673 }
2674
2675 static int tgsi_last_instruction(unsigned writemask)
2676 {
2677 int i, lasti = 0;
2678
2679 for (i = 0; i < 4; i++) {
2680 if (writemask & (1 << i)) {
2681 lasti = i;
2682 }
2683 }
2684 return lasti;
2685 }
2686
2687
2688
2689 static int tgsi_op2_64_params(struct r600_shader_ctx *ctx, bool singledest, bool swap)
2690 {
2691 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2692 unsigned write_mask = inst->Dst[0].Register.WriteMask;
2693 struct r600_bytecode_alu alu;
2694 int i, j, r, lasti = tgsi_last_instruction(write_mask);
2695 int use_tmp = 0;
2696
2697 if (singledest) {
2698 switch (write_mask) {
2699 case 0x1:
2700 write_mask = 0x3;
2701 break;
2702 case 0x2:
2703 use_tmp = 1;
2704 write_mask = 0x3;
2705 break;
2706 case 0x4:
2707 write_mask = 0xc;
2708 break;
2709 case 0x8:
2710 write_mask = 0xc;
2711 use_tmp = 3;
2712 break;
2713 }
2714 }
2715
2716 lasti = tgsi_last_instruction(write_mask);
2717 for (i = 0; i <= lasti; i++) {
2718
2719 if (!(write_mask & (1 << i)))
2720 continue;
2721
2722 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2723
2724 if (singledest) {
2725 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2726 if (use_tmp) {
2727 alu.dst.sel = ctx->temp_reg;
2728 alu.dst.chan = i;
2729 alu.dst.write = 1;
2730 }
2731 if (i == 1 || i == 3)
2732 alu.dst.write = 0;
2733 } else
2734 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2735
2736 alu.op = ctx->inst_info->op;
2737 if (ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DABS) {
2738 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
2739 } else if (!swap) {
2740 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
2741 r600_bytecode_src(&alu.src[j], &ctx->src[j], fp64_switch(i));
2742 }
2743 } else {
2744 r600_bytecode_src(&alu.src[0], &ctx->src[1], fp64_switch(i));
2745 r600_bytecode_src(&alu.src[1], &ctx->src[0], fp64_switch(i));
2746 }
2747
2748 /* handle some special cases */
2749 if (i == 1 || i == 3) {
2750 switch (ctx->parse.FullToken.FullInstruction.Instruction.Opcode) {
2751 case TGSI_OPCODE_SUB:
2752 r600_bytecode_src_toggle_neg(&alu.src[1]);
2753 break;
2754 case TGSI_OPCODE_DABS:
2755 r600_bytecode_src_set_abs(&alu.src[0]);
2756 break;
2757 default:
2758 break;
2759 }
2760 }
2761 if (i == lasti) {
2762 alu.last = 1;
2763 }
2764 r = r600_bytecode_add_alu(ctx->bc, &alu);
2765 if (r)
2766 return r;
2767 }
2768
2769 if (use_tmp) {
2770 write_mask = inst->Dst[0].Register.WriteMask;
2771
2772 /* move result from temp to dst */
2773 for (i = 0; i <= lasti; i++) {
2774 if (!(write_mask & (1 << i)))
2775 continue;
2776
2777 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2778 alu.op = ALU_OP1_MOV;
2779 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2780 alu.src[0].sel = ctx->temp_reg;
2781 alu.src[0].chan = use_tmp - 1;
2782 alu.last = (i == lasti);
2783
2784 r = r600_bytecode_add_alu(ctx->bc, &alu);
2785 if (r)
2786 return r;
2787 }
2788 }
2789 return 0;
2790 }
2791
2792 static int tgsi_op2_64(struct r600_shader_ctx *ctx)
2793 {
2794 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2795 unsigned write_mask = inst->Dst[0].Register.WriteMask;
2796 /* confirm writemasking */
2797 if ((write_mask & 0x3) != 0x3 &&
2798 (write_mask & 0xc) != 0xc) {
2799 fprintf(stderr, "illegal writemask for 64-bit: 0x%x\n", write_mask);
2800 return -1;
2801 }
2802 return tgsi_op2_64_params(ctx, false, false);
2803 }
2804
2805 static int tgsi_op2_64_single_dest(struct r600_shader_ctx *ctx)
2806 {
2807 return tgsi_op2_64_params(ctx, true, false);
2808 }
2809
2810 static int tgsi_op2_64_single_dest_s(struct r600_shader_ctx *ctx)
2811 {
2812 return tgsi_op2_64_params(ctx, true, true);
2813 }
2814
2815 static int tgsi_op3_64(struct r600_shader_ctx *ctx)
2816 {
2817 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2818 struct r600_bytecode_alu alu;
2819 int i, j, r;
2820 int lasti = 3;
2821 int tmp = r600_get_temp(ctx);
2822
2823 for (i = 0; i < lasti + 1; i++) {
2824
2825 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2826 alu.op = ctx->inst_info->op;
2827 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
2828 r600_bytecode_src(&alu.src[j], &ctx->src[j], i == 3 ? 0 : 1);
2829 }
2830
2831 if (inst->Dst[0].Register.WriteMask & (1 << i))
2832 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2833 else
2834 alu.dst.sel = tmp;
2835
2836 alu.dst.chan = i;
2837 alu.is_op3 = 1;
2838 if (i == lasti) {
2839 alu.last = 1;
2840 }
2841 r = r600_bytecode_add_alu(ctx->bc, &alu);
2842 if (r)
2843 return r;
2844 }
2845 return 0;
2846 }
2847
2848 static int tgsi_op2_s(struct r600_shader_ctx *ctx, int swap, int trans_only)
2849 {
2850 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2851 struct r600_bytecode_alu alu;
2852 unsigned write_mask = inst->Dst[0].Register.WriteMask;
2853 int i, j, r, lasti = tgsi_last_instruction(write_mask);
2854 /* use temp register if trans_only and more than one dst component */
2855 int use_tmp = trans_only && (write_mask ^ (1 << lasti));
2856
2857 for (i = 0; i <= lasti; i++) {
2858 if (!(write_mask & (1 << i)))
2859 continue;
2860
2861 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2862 if (use_tmp) {
2863 alu.dst.sel = ctx->temp_reg;
2864 alu.dst.chan = i;
2865 alu.dst.write = 1;
2866 } else
2867 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2868
2869 alu.op = ctx->inst_info->op;
2870 if (!swap) {
2871 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
2872 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
2873 }
2874 } else {
2875 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
2876 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
2877 }
2878 /* handle some special cases */
2879 switch (inst->Instruction.Opcode) {
2880 case TGSI_OPCODE_SUB:
2881 r600_bytecode_src_toggle_neg(&alu.src[1]);
2882 break;
2883 case TGSI_OPCODE_ABS:
2884 r600_bytecode_src_set_abs(&alu.src[0]);
2885 break;
2886 default:
2887 break;
2888 }
2889 if (i == lasti || trans_only) {
2890 alu.last = 1;
2891 }
2892 r = r600_bytecode_add_alu(ctx->bc, &alu);
2893 if (r)
2894 return r;
2895 }
2896
2897 if (use_tmp) {
2898 /* move result from temp to dst */
2899 for (i = 0; i <= lasti; i++) {
2900 if (!(write_mask & (1 << i)))
2901 continue;
2902
2903 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2904 alu.op = ALU_OP1_MOV;
2905 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2906 alu.src[0].sel = ctx->temp_reg;
2907 alu.src[0].chan = i;
2908 alu.last = (i == lasti);
2909
2910 r = r600_bytecode_add_alu(ctx->bc, &alu);
2911 if (r)
2912 return r;
2913 }
2914 }
2915 return 0;
2916 }
2917
2918 static int tgsi_op2(struct r600_shader_ctx *ctx)
2919 {
2920 return tgsi_op2_s(ctx, 0, 0);
2921 }
2922
2923 static int tgsi_op2_swap(struct r600_shader_ctx *ctx)
2924 {
2925 return tgsi_op2_s(ctx, 1, 0);
2926 }
2927
2928 static int tgsi_op2_trans(struct r600_shader_ctx *ctx)
2929 {
2930 return tgsi_op2_s(ctx, 0, 1);
2931 }
2932
2933 static int tgsi_ineg(struct r600_shader_ctx *ctx)
2934 {
2935 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2936 struct r600_bytecode_alu alu;
2937 int i, r;
2938 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
2939
2940 for (i = 0; i < lasti + 1; i++) {
2941
2942 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
2943 continue;
2944 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2945 alu.op = ctx->inst_info->op;
2946
2947 alu.src[0].sel = V_SQ_ALU_SRC_0;
2948
2949 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
2950
2951 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2952
2953 if (i == lasti) {
2954 alu.last = 1;
2955 }
2956 r = r600_bytecode_add_alu(ctx->bc, &alu);
2957 if (r)
2958 return r;
2959 }
2960 return 0;
2961
2962 }
2963
2964 static int tgsi_dneg(struct r600_shader_ctx *ctx)
2965 {
2966 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2967 struct r600_bytecode_alu alu;
2968 int i, r;
2969 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
2970
2971 for (i = 0; i < lasti + 1; i++) {
2972
2973 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
2974 continue;
2975 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
2976 alu.op = ALU_OP1_MOV;
2977
2978 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
2979
2980 if (i == 1 || i == 3)
2981 r600_bytecode_src_toggle_neg(&alu.src[0]);
2982 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
2983
2984 if (i == lasti) {
2985 alu.last = 1;
2986 }
2987 r = r600_bytecode_add_alu(ctx->bc, &alu);
2988 if (r)
2989 return r;
2990 }
2991 return 0;
2992
2993 }
2994
2995 static int tgsi_dfracexp(struct r600_shader_ctx *ctx)
2996 {
2997 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
2998 struct r600_bytecode_alu alu;
2999 unsigned write_mask = inst->Dst[0].Register.WriteMask;
3000 int i, j, r;
3001 int firsti = write_mask == 0xc ? 2 : 0;
3002
3003 for (i = 0; i <= 3; i++) {
3004 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3005 alu.op = ctx->inst_info->op;
3006
3007 alu.dst.sel = ctx->temp_reg;
3008 alu.dst.chan = i;
3009 alu.dst.write = 1;
3010 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
3011 r600_bytecode_src(&alu.src[j], &ctx->src[j], fp64_switch(i));
3012 }
3013
3014 if (i == 3)
3015 alu.last = 1;
3016
3017 r = r600_bytecode_add_alu(ctx->bc, &alu);
3018 if (r)
3019 return r;
3020 }
3021
3022 /* MOV first two channels to writemask dst0 */
3023 for (i = 0; i <= 1; i++) {
3024 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3025 alu.op = ALU_OP1_MOV;
3026 alu.src[0].chan = i + 2;
3027 alu.src[0].sel = ctx->temp_reg;
3028
3029 tgsi_dst(ctx, &inst->Dst[0], firsti + i, &alu.dst);
3030 alu.dst.write = (inst->Dst[0].Register.WriteMask >> (firsti + i)) & 1;
3031 alu.last = 1;
3032 r = r600_bytecode_add_alu(ctx->bc, &alu);
3033 if (r)
3034 return r;
3035 }
3036
3037 for (i = 0; i <= 3; i++) {
3038 if (inst->Dst[1].Register.WriteMask & (1 << i)) {
3039 /* MOV third channels to writemask dst1 */
3040 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3041 alu.op = ALU_OP1_MOV;
3042 alu.src[0].chan = 1;
3043 alu.src[0].sel = ctx->temp_reg;
3044
3045 tgsi_dst(ctx, &inst->Dst[1], i, &alu.dst);
3046 alu.last = 1;
3047 r = r600_bytecode_add_alu(ctx->bc, &alu);
3048 if (r)
3049 return r;
3050 break;
3051 }
3052 }
3053 return 0;
3054 }
3055
3056
3057 static int egcm_int_to_double(struct r600_shader_ctx *ctx)
3058 {
3059 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3060 struct r600_bytecode_alu alu;
3061 int i, r;
3062 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
3063
3064 assert(inst->Instruction.Opcode == TGSI_OPCODE_I2D ||
3065 inst->Instruction.Opcode == TGSI_OPCODE_U2D);
3066
3067 for (i = 0; i <= (lasti+1)/2; i++) {
3068 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3069 alu.op = ctx->inst_info->op;
3070
3071 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
3072 alu.dst.sel = ctx->temp_reg;
3073 alu.dst.chan = i;
3074 alu.dst.write = 1;
3075 alu.last = 1;
3076
3077 r = r600_bytecode_add_alu(ctx->bc, &alu);
3078 if (r)
3079 return r;
3080 }
3081
3082 for (i = 0; i <= lasti; i++) {
3083 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3084 alu.op = ALU_OP1_FLT32_TO_FLT64;
3085
3086 alu.src[0].chan = i/2;
3087 if (i%2 == 0)
3088 alu.src[0].sel = ctx->temp_reg;
3089 else {
3090 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
3091 alu.src[0].value = 0x0;
3092 }
3093 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3094 alu.last = i == lasti;
3095
3096 r = r600_bytecode_add_alu(ctx->bc, &alu);
3097 if (r)
3098 return r;
3099 }
3100
3101 return 0;
3102 }
3103
3104 static int egcm_double_to_int(struct r600_shader_ctx *ctx)
3105 {
3106 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3107 struct r600_bytecode_alu alu;
3108 int i, r;
3109 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
3110
3111 assert(inst->Instruction.Opcode == TGSI_OPCODE_D2I ||
3112 inst->Instruction.Opcode == TGSI_OPCODE_D2U);
3113
3114 for (i = 0; i <= lasti; i++) {
3115 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3116 alu.op = ALU_OP1_FLT64_TO_FLT32;
3117
3118 r600_bytecode_src(&alu.src[0], &ctx->src[0], fp64_switch(i));
3119 alu.dst.chan = i;
3120 alu.dst.sel = ctx->temp_reg;
3121 alu.dst.write = i%2 == 0;
3122 alu.last = i == lasti;
3123
3124 r = r600_bytecode_add_alu(ctx->bc, &alu);
3125 if (r)
3126 return r;
3127 }
3128
3129 for (i = 0; i <= (lasti+1)/2; i++) {
3130 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3131 alu.op = ctx->inst_info->op;
3132
3133 alu.src[0].chan = i*2;
3134 alu.src[0].sel = ctx->temp_reg;
3135 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
3136 alu.last = 1;
3137
3138 r = r600_bytecode_add_alu(ctx->bc, &alu);
3139 if (r)
3140 return r;
3141 }
3142
3143 return 0;
3144 }
3145
3146 static int cayman_emit_double_instr(struct r600_shader_ctx *ctx)
3147 {
3148 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3149 int i, r;
3150 struct r600_bytecode_alu alu;
3151 int last_slot = 3;
3152 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
3153 int t1 = ctx->temp_reg;
3154
3155 /* these have to write the result to X/Y by the looks of it */
3156 for (i = 0 ; i < last_slot; i++) {
3157 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3158 alu.op = ctx->inst_info->op;
3159
3160 /* should only be one src regs */
3161 assert (inst->Instruction.NumSrcRegs == 1);
3162
3163 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
3164 r600_bytecode_src(&alu.src[1], &ctx->src[0], 0);
3165
3166 /* RSQ should take the absolute value of src */
3167 if (ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DRSQ ||
3168 ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DSQRT) {
3169 r600_bytecode_src_set_abs(&alu.src[1]);
3170 }
3171 alu.dst.sel = t1;
3172 alu.dst.chan = i;
3173 alu.dst.write = (i == 0 || i == 1);
3174
3175 if (ctx->bc->chip_class != CAYMAN || i == last_slot - 1)
3176 alu.last = 1;
3177 r = r600_bytecode_add_alu(ctx->bc, &alu);
3178 if (r)
3179 return r;
3180 }
3181
3182 for (i = 0 ; i <= lasti; i++) {
3183 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
3184 continue;
3185 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3186 alu.op = ALU_OP1_MOV;
3187 alu.src[0].sel = t1;
3188 alu.src[0].chan = (i == 0 || i == 2) ? 0 : 1;
3189 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3190 alu.dst.write = 1;
3191 if (i == lasti)
3192 alu.last = 1;
3193 r = r600_bytecode_add_alu(ctx->bc, &alu);
3194 if (r)
3195 return r;
3196 }
3197 return 0;
3198 }
3199
3200 static int cayman_emit_float_instr(struct r600_shader_ctx *ctx)
3201 {
3202 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3203 int i, j, r;
3204 struct r600_bytecode_alu alu;
3205 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
3206
3207 for (i = 0 ; i < last_slot; i++) {
3208 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3209 alu.op = ctx->inst_info->op;
3210 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
3211 r600_bytecode_src(&alu.src[j], &ctx->src[j], 0);
3212
3213 /* RSQ should take the absolute value of src */
3214 if (inst->Instruction.Opcode == TGSI_OPCODE_RSQ) {
3215 r600_bytecode_src_set_abs(&alu.src[j]);
3216 }
3217 }
3218 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3219 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
3220
3221 if (i == last_slot - 1)
3222 alu.last = 1;
3223 r = r600_bytecode_add_alu(ctx->bc, &alu);
3224 if (r)
3225 return r;
3226 }
3227 return 0;
3228 }
3229
3230 static int cayman_mul_int_instr(struct r600_shader_ctx *ctx)
3231 {
3232 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3233 int i, j, k, r;
3234 struct r600_bytecode_alu alu;
3235 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
3236 int t1 = ctx->temp_reg;
3237
3238 for (k = 0; k <= lasti; k++) {
3239 if (!(inst->Dst[0].Register.WriteMask & (1 << k)))
3240 continue;
3241
3242 for (i = 0 ; i < 4; i++) {
3243 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3244 alu.op = ctx->inst_info->op;
3245 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
3246 r600_bytecode_src(&alu.src[j], &ctx->src[j], k);
3247 }
3248 alu.dst.sel = t1;
3249 alu.dst.chan = i;
3250 alu.dst.write = (i == k);
3251 if (i == 3)
3252 alu.last = 1;
3253 r = r600_bytecode_add_alu(ctx->bc, &alu);
3254 if (r)
3255 return r;
3256 }
3257 }
3258
3259 for (i = 0 ; i <= lasti; i++) {
3260 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
3261 continue;
3262 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3263 alu.op = ALU_OP1_MOV;
3264 alu.src[0].sel = t1;
3265 alu.src[0].chan = i;
3266 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3267 alu.dst.write = 1;
3268 if (i == lasti)
3269 alu.last = 1;
3270 r = r600_bytecode_add_alu(ctx->bc, &alu);
3271 if (r)
3272 return r;
3273 }
3274
3275 return 0;
3276 }
3277
3278
3279 static int cayman_mul_double_instr(struct r600_shader_ctx *ctx)
3280 {
3281 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3282 int i, j, k, r;
3283 struct r600_bytecode_alu alu;
3284 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
3285 int t1 = ctx->temp_reg;
3286
3287 for (k = 0; k < 2; k++) {
3288 if (!(inst->Dst[0].Register.WriteMask & (0x3 << (k * 2))))
3289 continue;
3290
3291 for (i = 0; i < 4; i++) {
3292 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3293 alu.op = ctx->inst_info->op;
3294 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
3295 r600_bytecode_src(&alu.src[j], &ctx->src[j], k * 2 + ((i == 3) ? 0 : 1));;
3296 }
3297 alu.dst.sel = t1;
3298 alu.dst.chan = i;
3299 alu.dst.write = 1;
3300 if (i == 3)
3301 alu.last = 1;
3302 r = r600_bytecode_add_alu(ctx->bc, &alu);
3303 if (r)
3304 return r;
3305 }
3306 }
3307
3308 for (i = 0; i <= lasti; i++) {
3309 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
3310 continue;
3311 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3312 alu.op = ALU_OP1_MOV;
3313 alu.src[0].sel = t1;
3314 alu.src[0].chan = i;
3315 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3316 alu.dst.write = 1;
3317 if (i == lasti)
3318 alu.last = 1;
3319 r = r600_bytecode_add_alu(ctx->bc, &alu);
3320 if (r)
3321 return r;
3322 }
3323
3324 return 0;
3325 }
3326
3327 /*
3328 * r600 - trunc to -PI..PI range
3329 * r700 - normalize by dividing by 2PI
3330 * see fdo bug 27901
3331 */
3332 static int tgsi_setup_trig(struct r600_shader_ctx *ctx)
3333 {
3334 static float half_inv_pi = 1.0 /(3.1415926535 * 2);
3335 static float double_pi = 3.1415926535 * 2;
3336 static float neg_pi = -3.1415926535;
3337
3338 int r;
3339 struct r600_bytecode_alu alu;
3340
3341 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3342 alu.op = ALU_OP3_MULADD;
3343 alu.is_op3 = 1;
3344
3345 alu.dst.chan = 0;
3346 alu.dst.sel = ctx->temp_reg;
3347 alu.dst.write = 1;
3348
3349 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
3350
3351 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
3352 alu.src[1].chan = 0;
3353 alu.src[1].value = *(uint32_t *)&half_inv_pi;
3354 alu.src[2].sel = V_SQ_ALU_SRC_0_5;
3355 alu.src[2].chan = 0;
3356 alu.last = 1;
3357 r = r600_bytecode_add_alu(ctx->bc, &alu);
3358 if (r)
3359 return r;
3360
3361 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3362 alu.op = ALU_OP1_FRACT;
3363
3364 alu.dst.chan = 0;
3365 alu.dst.sel = ctx->temp_reg;
3366 alu.dst.write = 1;
3367
3368 alu.src[0].sel = ctx->temp_reg;
3369 alu.src[0].chan = 0;
3370 alu.last = 1;
3371 r = r600_bytecode_add_alu(ctx->bc, &alu);
3372 if (r)
3373 return r;
3374
3375 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3376 alu.op = ALU_OP3_MULADD;
3377 alu.is_op3 = 1;
3378
3379 alu.dst.chan = 0;
3380 alu.dst.sel = ctx->temp_reg;
3381 alu.dst.write = 1;
3382
3383 alu.src[0].sel = ctx->temp_reg;
3384 alu.src[0].chan = 0;
3385
3386 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
3387 alu.src[1].chan = 0;
3388 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
3389 alu.src[2].chan = 0;
3390
3391 if (ctx->bc->chip_class == R600) {
3392 alu.src[1].value = *(uint32_t *)&double_pi;
3393 alu.src[2].value = *(uint32_t *)&neg_pi;
3394 } else {
3395 alu.src[1].sel = V_SQ_ALU_SRC_1;
3396 alu.src[2].sel = V_SQ_ALU_SRC_0_5;
3397 alu.src[2].neg = 1;
3398 }
3399
3400 alu.last = 1;
3401 r = r600_bytecode_add_alu(ctx->bc, &alu);
3402 if (r)
3403 return r;
3404 return 0;
3405 }
3406
3407 static int cayman_trig(struct r600_shader_ctx *ctx)
3408 {
3409 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3410 struct r600_bytecode_alu alu;
3411 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
3412 int i, r;
3413
3414 r = tgsi_setup_trig(ctx);
3415 if (r)
3416 return r;
3417
3418
3419 for (i = 0; i < last_slot; i++) {
3420 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3421 alu.op = ctx->inst_info->op;
3422 alu.dst.chan = i;
3423
3424 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3425 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
3426
3427 alu.src[0].sel = ctx->temp_reg;
3428 alu.src[0].chan = 0;
3429 if (i == last_slot - 1)
3430 alu.last = 1;
3431 r = r600_bytecode_add_alu(ctx->bc, &alu);
3432 if (r)
3433 return r;
3434 }
3435 return 0;
3436 }
3437
3438 static int tgsi_trig(struct r600_shader_ctx *ctx)
3439 {
3440 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3441 struct r600_bytecode_alu alu;
3442 int i, r;
3443 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
3444
3445 r = tgsi_setup_trig(ctx);
3446 if (r)
3447 return r;
3448
3449 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3450 alu.op = ctx->inst_info->op;
3451 alu.dst.chan = 0;
3452 alu.dst.sel = ctx->temp_reg;
3453 alu.dst.write = 1;
3454
3455 alu.src[0].sel = ctx->temp_reg;
3456 alu.src[0].chan = 0;
3457 alu.last = 1;
3458 r = r600_bytecode_add_alu(ctx->bc, &alu);
3459 if (r)
3460 return r;
3461
3462 /* replicate result */
3463 for (i = 0; i < lasti + 1; i++) {
3464 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
3465 continue;
3466
3467 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3468 alu.op = ALU_OP1_MOV;
3469
3470 alu.src[0].sel = ctx->temp_reg;
3471 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3472 if (i == lasti)
3473 alu.last = 1;
3474 r = r600_bytecode_add_alu(ctx->bc, &alu);
3475 if (r)
3476 return r;
3477 }
3478 return 0;
3479 }
3480
3481 static int tgsi_scs(struct r600_shader_ctx *ctx)
3482 {
3483 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3484 struct r600_bytecode_alu alu;
3485 int i, r;
3486
3487 /* We'll only need the trig stuff if we are going to write to the
3488 * X or Y components of the destination vector.
3489 */
3490 if (likely(inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_XY)) {
3491 r = tgsi_setup_trig(ctx);
3492 if (r)
3493 return r;
3494 }
3495
3496 /* dst.x = COS */
3497 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
3498 if (ctx->bc->chip_class == CAYMAN) {
3499 for (i = 0 ; i < 3; i++) {
3500 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3501 alu.op = ALU_OP1_COS;
3502 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3503
3504 if (i == 0)
3505 alu.dst.write = 1;
3506 else
3507 alu.dst.write = 0;
3508 alu.src[0].sel = ctx->temp_reg;
3509 alu.src[0].chan = 0;
3510 if (i == 2)
3511 alu.last = 1;
3512 r = r600_bytecode_add_alu(ctx->bc, &alu);
3513 if (r)
3514 return r;
3515 }
3516 } else {
3517 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3518 alu.op = ALU_OP1_COS;
3519 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
3520
3521 alu.src[0].sel = ctx->temp_reg;
3522 alu.src[0].chan = 0;
3523 alu.last = 1;
3524 r = r600_bytecode_add_alu(ctx->bc, &alu);
3525 if (r)
3526 return r;
3527 }
3528 }
3529
3530 /* dst.y = SIN */
3531 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
3532 if (ctx->bc->chip_class == CAYMAN) {
3533 for (i = 0 ; i < 3; i++) {
3534 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3535 alu.op = ALU_OP1_SIN;
3536 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3537 if (i == 1)
3538 alu.dst.write = 1;
3539 else
3540 alu.dst.write = 0;
3541 alu.src[0].sel = ctx->temp_reg;
3542 alu.src[0].chan = 0;
3543 if (i == 2)
3544 alu.last = 1;
3545 r = r600_bytecode_add_alu(ctx->bc, &alu);
3546 if (r)
3547 return r;
3548 }
3549 } else {
3550 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3551 alu.op = ALU_OP1_SIN;
3552 tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
3553
3554 alu.src[0].sel = ctx->temp_reg;
3555 alu.src[0].chan = 0;
3556 alu.last = 1;
3557 r = r600_bytecode_add_alu(ctx->bc, &alu);
3558 if (r)
3559 return r;
3560 }
3561 }
3562
3563 /* dst.z = 0.0; */
3564 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
3565 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3566
3567 alu.op = ALU_OP1_MOV;
3568
3569 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
3570
3571 alu.src[0].sel = V_SQ_ALU_SRC_0;
3572 alu.src[0].chan = 0;
3573
3574 alu.last = 1;
3575
3576 r = r600_bytecode_add_alu(ctx->bc, &alu);
3577 if (r)
3578 return r;
3579 }
3580
3581 /* dst.w = 1.0; */
3582 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
3583 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3584
3585 alu.op = ALU_OP1_MOV;
3586
3587 tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
3588
3589 alu.src[0].sel = V_SQ_ALU_SRC_1;
3590 alu.src[0].chan = 0;
3591
3592 alu.last = 1;
3593
3594 r = r600_bytecode_add_alu(ctx->bc, &alu);
3595 if (r)
3596 return r;
3597 }
3598
3599 return 0;
3600 }
3601
3602 static int tgsi_kill(struct r600_shader_ctx *ctx)
3603 {
3604 const struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3605 struct r600_bytecode_alu alu;
3606 int i, r;
3607
3608 for (i = 0; i < 4; i++) {
3609 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3610 alu.op = ctx->inst_info->op;
3611
3612 alu.dst.chan = i;
3613
3614 alu.src[0].sel = V_SQ_ALU_SRC_0;
3615
3616 if (inst->Instruction.Opcode == TGSI_OPCODE_KILL) {
3617 alu.src[1].sel = V_SQ_ALU_SRC_1;
3618 alu.src[1].neg = 1;
3619 } else {
3620 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
3621 }
3622 if (i == 3) {
3623 alu.last = 1;
3624 }
3625 r = r600_bytecode_add_alu(ctx->bc, &alu);
3626 if (r)
3627 return r;
3628 }
3629
3630 /* kill must be last in ALU */
3631 ctx->bc->force_add_cf = 1;
3632 ctx->shader->uses_kill = TRUE;
3633 return 0;
3634 }
3635
3636 static int tgsi_lit(struct r600_shader_ctx *ctx)
3637 {
3638 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3639 struct r600_bytecode_alu alu;
3640 int r;
3641
3642 /* tmp.x = max(src.y, 0.0) */
3643 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3644 alu.op = ALU_OP2_MAX;
3645 r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
3646 alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/
3647 alu.src[1].chan = 1;
3648
3649 alu.dst.sel = ctx->temp_reg;
3650 alu.dst.chan = 0;
3651 alu.dst.write = 1;
3652
3653 alu.last = 1;
3654 r = r600_bytecode_add_alu(ctx->bc, &alu);
3655 if (r)
3656 return r;
3657
3658 if (inst->Dst[0].Register.WriteMask & (1 << 2))
3659 {
3660 int chan;
3661 int sel;
3662 int i;
3663
3664 if (ctx->bc->chip_class == CAYMAN) {
3665 for (i = 0; i < 3; i++) {
3666 /* tmp.z = log(tmp.x) */
3667 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3668 alu.op = ALU_OP1_LOG_CLAMPED;
3669 alu.src[0].sel = ctx->temp_reg;
3670 alu.src[0].chan = 0;
3671 alu.dst.sel = ctx->temp_reg;
3672 alu.dst.chan = i;
3673 if (i == 2) {
3674 alu.dst.write = 1;
3675 alu.last = 1;
3676 } else
3677 alu.dst.write = 0;
3678
3679 r = r600_bytecode_add_alu(ctx->bc, &alu);
3680 if (r)
3681 return r;
3682 }
3683 } else {
3684 /* tmp.z = log(tmp.x) */
3685 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3686 alu.op = ALU_OP1_LOG_CLAMPED;
3687 alu.src[0].sel = ctx->temp_reg;
3688 alu.src[0].chan = 0;
3689 alu.dst.sel = ctx->temp_reg;
3690 alu.dst.chan = 2;
3691 alu.dst.write = 1;
3692 alu.last = 1;
3693 r = r600_bytecode_add_alu(ctx->bc, &alu);
3694 if (r)
3695 return r;
3696 }
3697
3698 chan = alu.dst.chan;
3699 sel = alu.dst.sel;
3700
3701 /* tmp.x = amd MUL_LIT(tmp.z, src.w, src.x ) */
3702 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3703 alu.op = ALU_OP3_MUL_LIT;
3704 alu.src[0].sel = sel;
3705 alu.src[0].chan = chan;
3706 r600_bytecode_src(&alu.src[1], &ctx->src[0], 3);
3707 r600_bytecode_src(&alu.src[2], &ctx->src[0], 0);
3708 alu.dst.sel = ctx->temp_reg;
3709 alu.dst.chan = 0;
3710 alu.dst.write = 1;
3711 alu.is_op3 = 1;
3712 alu.last = 1;
3713 r = r600_bytecode_add_alu(ctx->bc, &alu);
3714 if (r)
3715 return r;
3716
3717 if (ctx->bc->chip_class == CAYMAN) {
3718 for (i = 0; i < 3; i++) {
3719 /* dst.z = exp(tmp.x) */
3720 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3721 alu.op = ALU_OP1_EXP_IEEE;
3722 alu.src[0].sel = ctx->temp_reg;
3723 alu.src[0].chan = 0;
3724 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3725 if (i == 2) {
3726 alu.dst.write = 1;
3727 alu.last = 1;
3728 } else
3729 alu.dst.write = 0;
3730 r = r600_bytecode_add_alu(ctx->bc, &alu);
3731 if (r)
3732 return r;
3733 }
3734 } else {
3735 /* dst.z = exp(tmp.x) */
3736 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3737 alu.op = ALU_OP1_EXP_IEEE;
3738 alu.src[0].sel = ctx->temp_reg;
3739 alu.src[0].chan = 0;
3740 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
3741 alu.last = 1;
3742 r = r600_bytecode_add_alu(ctx->bc, &alu);
3743 if (r)
3744 return r;
3745 }
3746 }
3747
3748 /* dst.x, <- 1.0 */
3749 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3750 alu.op = ALU_OP1_MOV;
3751 alu.src[0].sel = V_SQ_ALU_SRC_1; /*1.0*/
3752 alu.src[0].chan = 0;
3753 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
3754 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 0) & 1;
3755 r = r600_bytecode_add_alu(ctx->bc, &alu);
3756 if (r)
3757 return r;
3758
3759 /* dst.y = max(src.x, 0.0) */
3760 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3761 alu.op = ALU_OP2_MAX;
3762 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
3763 alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/
3764 alu.src[1].chan = 0;
3765 tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
3766 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 1) & 1;
3767 r = r600_bytecode_add_alu(ctx->bc, &alu);
3768 if (r)
3769 return r;
3770
3771 /* dst.w, <- 1.0 */
3772 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3773 alu.op = ALU_OP1_MOV;
3774 alu.src[0].sel = V_SQ_ALU_SRC_1;
3775 alu.src[0].chan = 0;
3776 tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
3777 alu.dst.write = (inst->Dst[0].Register.WriteMask >> 3) & 1;
3778 alu.last = 1;
3779 r = r600_bytecode_add_alu(ctx->bc, &alu);
3780 if (r)
3781 return r;
3782
3783 return 0;
3784 }
3785
3786 static int tgsi_rsq(struct r600_shader_ctx *ctx)
3787 {
3788 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3789 struct r600_bytecode_alu alu;
3790 int i, r;
3791
3792 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3793
3794 /* XXX:
3795 * For state trackers other than OpenGL, we'll want to use
3796 * _RECIPSQRT_IEEE instead.
3797 */
3798 alu.op = ALU_OP1_RECIPSQRT_CLAMPED;
3799
3800 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
3801 r600_bytecode_src(&alu.src[i], &ctx->src[i], 0);
3802 r600_bytecode_src_set_abs(&alu.src[i]);
3803 }
3804 alu.dst.sel = ctx->temp_reg;
3805 alu.dst.write = 1;
3806 alu.last = 1;
3807 r = r600_bytecode_add_alu(ctx->bc, &alu);
3808 if (r)
3809 return r;
3810 /* replicate result */
3811 return tgsi_helper_tempx_replicate(ctx);
3812 }
3813
3814 static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx)
3815 {
3816 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3817 struct r600_bytecode_alu alu;
3818 int i, r;
3819
3820 for (i = 0; i < 4; i++) {
3821 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3822 alu.src[0].sel = ctx->temp_reg;
3823 alu.op = ALU_OP1_MOV;
3824 alu.dst.chan = i;
3825 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3826 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
3827 if (i == 3)
3828 alu.last = 1;
3829 r = r600_bytecode_add_alu(ctx->bc, &alu);
3830 if (r)
3831 return r;
3832 }
3833 return 0;
3834 }
3835
3836 static int tgsi_trans_srcx_replicate(struct r600_shader_ctx *ctx)
3837 {
3838 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3839 struct r600_bytecode_alu alu;
3840 int i, r;
3841
3842 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3843 alu.op = ctx->inst_info->op;
3844 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
3845 r600_bytecode_src(&alu.src[i], &ctx->src[i], 0);
3846 }
3847 alu.dst.sel = ctx->temp_reg;
3848 alu.dst.write = 1;
3849 alu.last = 1;
3850 r = r600_bytecode_add_alu(ctx->bc, &alu);
3851 if (r)
3852 return r;
3853 /* replicate result */
3854 return tgsi_helper_tempx_replicate(ctx);
3855 }
3856
3857 static int cayman_pow(struct r600_shader_ctx *ctx)
3858 {
3859 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3860 int i, r;
3861 struct r600_bytecode_alu alu;
3862 int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3;
3863
3864 for (i = 0; i < 3; i++) {
3865 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3866 alu.op = ALU_OP1_LOG_IEEE;
3867 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
3868 alu.dst.sel = ctx->temp_reg;
3869 alu.dst.chan = i;
3870 alu.dst.write = 1;
3871 if (i == 2)
3872 alu.last = 1;
3873 r = r600_bytecode_add_alu(ctx->bc, &alu);
3874 if (r)
3875 return r;
3876 }
3877
3878 /* b * LOG2(a) */
3879 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3880 alu.op = ALU_OP2_MUL;
3881 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
3882 alu.src[1].sel = ctx->temp_reg;
3883 alu.dst.sel = ctx->temp_reg;
3884 alu.dst.write = 1;
3885 alu.last = 1;
3886 r = r600_bytecode_add_alu(ctx->bc, &alu);
3887 if (r)
3888 return r;
3889
3890 for (i = 0; i < last_slot; i++) {
3891 /* POW(a,b) = EXP2(b * LOG2(a))*/
3892 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3893 alu.op = ALU_OP1_EXP_IEEE;
3894 alu.src[0].sel = ctx->temp_reg;
3895
3896 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
3897 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
3898 if (i == last_slot - 1)
3899 alu.last = 1;
3900 r = r600_bytecode_add_alu(ctx->bc, &alu);
3901 if (r)
3902 return r;
3903 }
3904 return 0;
3905 }
3906
3907 static int tgsi_pow(struct r600_shader_ctx *ctx)
3908 {
3909 struct r600_bytecode_alu alu;
3910 int r;
3911
3912 /* LOG2(a) */
3913 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3914 alu.op = ALU_OP1_LOG_IEEE;
3915 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
3916 alu.dst.sel = ctx->temp_reg;
3917 alu.dst.write = 1;
3918 alu.last = 1;
3919 r = r600_bytecode_add_alu(ctx->bc, &alu);
3920 if (r)
3921 return r;
3922 /* b * LOG2(a) */
3923 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3924 alu.op = ALU_OP2_MUL;
3925 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
3926 alu.src[1].sel = ctx->temp_reg;
3927 alu.dst.sel = ctx->temp_reg;
3928 alu.dst.write = 1;
3929 alu.last = 1;
3930 r = r600_bytecode_add_alu(ctx->bc, &alu);
3931 if (r)
3932 return r;
3933 /* POW(a,b) = EXP2(b * LOG2(a))*/
3934 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
3935 alu.op = ALU_OP1_EXP_IEEE;
3936 alu.src[0].sel = ctx->temp_reg;
3937 alu.dst.sel = ctx->temp_reg;
3938 alu.dst.write = 1;
3939 alu.last = 1;
3940 r = r600_bytecode_add_alu(ctx->bc, &alu);
3941 if (r)
3942 return r;
3943 return tgsi_helper_tempx_replicate(ctx);
3944 }
3945
3946 static int tgsi_divmod(struct r600_shader_ctx *ctx, int mod, int signed_op)
3947 {
3948 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
3949 struct r600_bytecode_alu alu;
3950 int i, r, j;
3951 unsigned write_mask = inst->Dst[0].Register.WriteMask;
3952 int tmp0 = ctx->temp_reg;
3953 int tmp1 = r600_get_temp(ctx);
3954 int tmp2 = r600_get_temp(ctx);
3955 int tmp3 = r600_get_temp(ctx);
3956 /* Unsigned path:
3957 *
3958 * we need to represent src1 as src2*q + r, where q - quotient, r - remainder
3959 *
3960 * 1. tmp0.x = rcp (src2) = 2^32/src2 + e, where e is rounding error
3961 * 2. tmp0.z = lo (tmp0.x * src2)
3962 * 3. tmp0.w = -tmp0.z
3963 * 4. tmp0.y = hi (tmp0.x * src2)
3964 * 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src2))
3965 * 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error
3966 * 7. tmp1.x = tmp0.x - tmp0.w
3967 * 8. tmp1.y = tmp0.x + tmp0.w
3968 * 9. tmp0.x = (tmp0.y == 0 ? tmp1.y : tmp1.x)
3969 * 10. tmp0.z = hi(tmp0.x * src1) = q
3970 * 11. tmp0.y = lo (tmp0.z * src2) = src2*q = src1 - r
3971 *
3972 * 12. tmp0.w = src1 - tmp0.y = r
3973 * 13. tmp1.x = tmp0.w >= src2 = r >= src2 (uint comparison)
3974 * 14. tmp1.y = src1 >= tmp0.y = r >= 0 (uint comparison)
3975 *
3976 * if DIV
3977 *
3978 * 15. tmp1.z = tmp0.z + 1 = q + 1
3979 * 16. tmp1.w = tmp0.z - 1 = q - 1
3980 *
3981 * else MOD
3982 *
3983 * 15. tmp1.z = tmp0.w - src2 = r - src2
3984 * 16. tmp1.w = tmp0.w + src2 = r + src2
3985 *
3986 * endif
3987 *
3988 * 17. tmp1.x = tmp1.x & tmp1.y
3989 *
3990 * DIV: 18. tmp0.z = tmp1.x==0 ? tmp0.z : tmp1.z
3991 * MOD: 18. tmp0.z = tmp1.x==0 ? tmp0.w : tmp1.z
3992 *
3993 * 19. tmp0.z = tmp1.y==0 ? tmp1.w : tmp0.z
3994 * 20. dst = src2==0 ? MAX_UINT : tmp0.z
3995 *
3996 * Signed path:
3997 *
3998 * Same as unsigned, using abs values of the operands,
3999 * and fixing the sign of the result in the end.
4000 */
4001
4002 for (i = 0; i < 4; i++) {
4003 if (!(write_mask & (1<<i)))
4004 continue;
4005
4006 if (signed_op) {
4007
4008 /* tmp2.x = -src0 */
4009 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4010 alu.op = ALU_OP2_SUB_INT;
4011
4012 alu.dst.sel = tmp2;
4013 alu.dst.chan = 0;
4014 alu.dst.write = 1;
4015
4016 alu.src[0].sel = V_SQ_ALU_SRC_0;
4017
4018 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4019
4020 alu.last = 1;
4021 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4022 return r;
4023
4024 /* tmp2.y = -src1 */
4025 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4026 alu.op = ALU_OP2_SUB_INT;
4027
4028 alu.dst.sel = tmp2;
4029 alu.dst.chan = 1;
4030 alu.dst.write = 1;
4031
4032 alu.src[0].sel = V_SQ_ALU_SRC_0;
4033
4034 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4035
4036 alu.last = 1;
4037 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4038 return r;
4039
4040 /* tmp2.z sign bit is set if src0 and src2 signs are different */
4041 /* it will be a sign of the quotient */
4042 if (!mod) {
4043
4044 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4045 alu.op = ALU_OP2_XOR_INT;
4046
4047 alu.dst.sel = tmp2;
4048 alu.dst.chan = 2;
4049 alu.dst.write = 1;
4050
4051 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4052 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4053
4054 alu.last = 1;
4055 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4056 return r;
4057 }
4058
4059 /* tmp2.x = |src0| */
4060 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4061 alu.op = ALU_OP3_CNDGE_INT;
4062 alu.is_op3 = 1;
4063
4064 alu.dst.sel = tmp2;
4065 alu.dst.chan = 0;
4066 alu.dst.write = 1;
4067
4068 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4069 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4070 alu.src[2].sel = tmp2;
4071 alu.src[2].chan = 0;
4072
4073 alu.last = 1;
4074 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4075 return r;
4076
4077 /* tmp2.y = |src1| */
4078 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4079 alu.op = ALU_OP3_CNDGE_INT;
4080 alu.is_op3 = 1;
4081
4082 alu.dst.sel = tmp2;
4083 alu.dst.chan = 1;
4084 alu.dst.write = 1;
4085
4086 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
4087 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4088 alu.src[2].sel = tmp2;
4089 alu.src[2].chan = 1;
4090
4091 alu.last = 1;
4092 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4093 return r;
4094
4095 }
4096
4097 /* 1. tmp0.x = rcp_u (src2) = 2^32/src2 + e, where e is rounding error */
4098 if (ctx->bc->chip_class == CAYMAN) {
4099 /* tmp3.x = u2f(src2) */
4100 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4101 alu.op = ALU_OP1_UINT_TO_FLT;
4102
4103 alu.dst.sel = tmp3;
4104 alu.dst.chan = 0;
4105 alu.dst.write = 1;
4106
4107 if (signed_op) {
4108 alu.src[0].sel = tmp2;
4109 alu.src[0].chan = 1;
4110 } else {
4111 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
4112 }
4113
4114 alu.last = 1;
4115 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4116 return r;
4117
4118 /* tmp0.x = recip(tmp3.x) */
4119 for (j = 0 ; j < 3; j++) {
4120 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4121 alu.op = ALU_OP1_RECIP_IEEE;
4122
4123 alu.dst.sel = tmp0;
4124 alu.dst.chan = j;
4125 alu.dst.write = (j == 0);
4126
4127 alu.src[0].sel = tmp3;
4128 alu.src[0].chan = 0;
4129
4130 if (j == 2)
4131 alu.last = 1;
4132 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4133 return r;
4134 }
4135
4136 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4137 alu.op = ALU_OP2_MUL;
4138
4139 alu.src[0].sel = tmp0;
4140 alu.src[0].chan = 0;
4141
4142 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
4143 alu.src[1].value = 0x4f800000;
4144
4145 alu.dst.sel = tmp3;
4146 alu.dst.write = 1;
4147 alu.last = 1;
4148 r = r600_bytecode_add_alu(ctx->bc, &alu);
4149 if (r)
4150 return r;
4151
4152 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4153 alu.op = ALU_OP1_FLT_TO_UINT;
4154
4155 alu.dst.sel = tmp0;
4156 alu.dst.chan = 0;
4157 alu.dst.write = 1;
4158
4159 alu.src[0].sel = tmp3;
4160 alu.src[0].chan = 0;
4161
4162 alu.last = 1;
4163 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4164 return r;
4165
4166 } else {
4167 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4168 alu.op = ALU_OP1_RECIP_UINT;
4169
4170 alu.dst.sel = tmp0;
4171 alu.dst.chan = 0;
4172 alu.dst.write = 1;
4173
4174 if (signed_op) {
4175 alu.src[0].sel = tmp2;
4176 alu.src[0].chan = 1;
4177 } else {
4178 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
4179 }
4180
4181 alu.last = 1;
4182 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4183 return r;
4184 }
4185
4186 /* 2. tmp0.z = lo (tmp0.x * src2) */
4187 if (ctx->bc->chip_class == CAYMAN) {
4188 for (j = 0 ; j < 4; j++) {
4189 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4190 alu.op = ALU_OP2_MULLO_UINT;
4191
4192 alu.dst.sel = tmp0;
4193 alu.dst.chan = j;
4194 alu.dst.write = (j == 2);
4195
4196 alu.src[0].sel = tmp0;
4197 alu.src[0].chan = 0;
4198 if (signed_op) {
4199 alu.src[1].sel = tmp2;
4200 alu.src[1].chan = 1;
4201 } else {
4202 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4203 }
4204
4205 alu.last = (j == 3);
4206 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4207 return r;
4208 }
4209 } else {
4210 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4211 alu.op = ALU_OP2_MULLO_UINT;
4212
4213 alu.dst.sel = tmp0;
4214 alu.dst.chan = 2;
4215 alu.dst.write = 1;
4216
4217 alu.src[0].sel = tmp0;
4218 alu.src[0].chan = 0;
4219 if (signed_op) {
4220 alu.src[1].sel = tmp2;
4221 alu.src[1].chan = 1;
4222 } else {
4223 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4224 }
4225
4226 alu.last = 1;
4227 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4228 return r;
4229 }
4230
4231 /* 3. tmp0.w = -tmp0.z */
4232 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4233 alu.op = ALU_OP2_SUB_INT;
4234
4235 alu.dst.sel = tmp0;
4236 alu.dst.chan = 3;
4237 alu.dst.write = 1;
4238
4239 alu.src[0].sel = V_SQ_ALU_SRC_0;
4240 alu.src[1].sel = tmp0;
4241 alu.src[1].chan = 2;
4242
4243 alu.last = 1;
4244 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4245 return r;
4246
4247 /* 4. tmp0.y = hi (tmp0.x * src2) */
4248 if (ctx->bc->chip_class == CAYMAN) {
4249 for (j = 0 ; j < 4; j++) {
4250 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4251 alu.op = ALU_OP2_MULHI_UINT;
4252
4253 alu.dst.sel = tmp0;
4254 alu.dst.chan = j;
4255 alu.dst.write = (j == 1);
4256
4257 alu.src[0].sel = tmp0;
4258 alu.src[0].chan = 0;
4259
4260 if (signed_op) {
4261 alu.src[1].sel = tmp2;
4262 alu.src[1].chan = 1;
4263 } else {
4264 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4265 }
4266 alu.last = (j == 3);
4267 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4268 return r;
4269 }
4270 } else {
4271 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4272 alu.op = ALU_OP2_MULHI_UINT;
4273
4274 alu.dst.sel = tmp0;
4275 alu.dst.chan = 1;
4276 alu.dst.write = 1;
4277
4278 alu.src[0].sel = tmp0;
4279 alu.src[0].chan = 0;
4280
4281 if (signed_op) {
4282 alu.src[1].sel = tmp2;
4283 alu.src[1].chan = 1;
4284 } else {
4285 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4286 }
4287
4288 alu.last = 1;
4289 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4290 return r;
4291 }
4292
4293 /* 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src)) */
4294 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4295 alu.op = ALU_OP3_CNDE_INT;
4296 alu.is_op3 = 1;
4297
4298 alu.dst.sel = tmp0;
4299 alu.dst.chan = 2;
4300 alu.dst.write = 1;
4301
4302 alu.src[0].sel = tmp0;
4303 alu.src[0].chan = 1;
4304 alu.src[1].sel = tmp0;
4305 alu.src[1].chan = 3;
4306 alu.src[2].sel = tmp0;
4307 alu.src[2].chan = 2;
4308
4309 alu.last = 1;
4310 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4311 return r;
4312
4313 /* 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error */
4314 if (ctx->bc->chip_class == CAYMAN) {
4315 for (j = 0 ; j < 4; j++) {
4316 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4317 alu.op = ALU_OP2_MULHI_UINT;
4318
4319 alu.dst.sel = tmp0;
4320 alu.dst.chan = j;
4321 alu.dst.write = (j == 3);
4322
4323 alu.src[0].sel = tmp0;
4324 alu.src[0].chan = 2;
4325
4326 alu.src[1].sel = tmp0;
4327 alu.src[1].chan = 0;
4328
4329 alu.last = (j == 3);
4330 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4331 return r;
4332 }
4333 } else {
4334 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4335 alu.op = ALU_OP2_MULHI_UINT;
4336
4337 alu.dst.sel = tmp0;
4338 alu.dst.chan = 3;
4339 alu.dst.write = 1;
4340
4341 alu.src[0].sel = tmp0;
4342 alu.src[0].chan = 2;
4343
4344 alu.src[1].sel = tmp0;
4345 alu.src[1].chan = 0;
4346
4347 alu.last = 1;
4348 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4349 return r;
4350 }
4351
4352 /* 7. tmp1.x = tmp0.x - tmp0.w */
4353 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4354 alu.op = ALU_OP2_SUB_INT;
4355
4356 alu.dst.sel = tmp1;
4357 alu.dst.chan = 0;
4358 alu.dst.write = 1;
4359
4360 alu.src[0].sel = tmp0;
4361 alu.src[0].chan = 0;
4362 alu.src[1].sel = tmp0;
4363 alu.src[1].chan = 3;
4364
4365 alu.last = 1;
4366 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4367 return r;
4368
4369 /* 8. tmp1.y = tmp0.x + tmp0.w */
4370 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4371 alu.op = ALU_OP2_ADD_INT;
4372
4373 alu.dst.sel = tmp1;
4374 alu.dst.chan = 1;
4375 alu.dst.write = 1;
4376
4377 alu.src[0].sel = tmp0;
4378 alu.src[0].chan = 0;
4379 alu.src[1].sel = tmp0;
4380 alu.src[1].chan = 3;
4381
4382 alu.last = 1;
4383 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4384 return r;
4385
4386 /* 9. tmp0.x = (tmp0.y == 0 ? tmp1.y : tmp1.x) */
4387 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4388 alu.op = ALU_OP3_CNDE_INT;
4389 alu.is_op3 = 1;
4390
4391 alu.dst.sel = tmp0;
4392 alu.dst.chan = 0;
4393 alu.dst.write = 1;
4394
4395 alu.src[0].sel = tmp0;
4396 alu.src[0].chan = 1;
4397 alu.src[1].sel = tmp1;
4398 alu.src[1].chan = 1;
4399 alu.src[2].sel = tmp1;
4400 alu.src[2].chan = 0;
4401
4402 alu.last = 1;
4403 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4404 return r;
4405
4406 /* 10. tmp0.z = hi(tmp0.x * src1) = q */
4407 if (ctx->bc->chip_class == CAYMAN) {
4408 for (j = 0 ; j < 4; j++) {
4409 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4410 alu.op = ALU_OP2_MULHI_UINT;
4411
4412 alu.dst.sel = tmp0;
4413 alu.dst.chan = j;
4414 alu.dst.write = (j == 2);
4415
4416 alu.src[0].sel = tmp0;
4417 alu.src[0].chan = 0;
4418
4419 if (signed_op) {
4420 alu.src[1].sel = tmp2;
4421 alu.src[1].chan = 0;
4422 } else {
4423 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4424 }
4425
4426 alu.last = (j == 3);
4427 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4428 return r;
4429 }
4430 } else {
4431 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4432 alu.op = ALU_OP2_MULHI_UINT;
4433
4434 alu.dst.sel = tmp0;
4435 alu.dst.chan = 2;
4436 alu.dst.write = 1;
4437
4438 alu.src[0].sel = tmp0;
4439 alu.src[0].chan = 0;
4440
4441 if (signed_op) {
4442 alu.src[1].sel = tmp2;
4443 alu.src[1].chan = 0;
4444 } else {
4445 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4446 }
4447
4448 alu.last = 1;
4449 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4450 return r;
4451 }
4452
4453 /* 11. tmp0.y = lo (src2 * tmp0.z) = src2*q = src1 - r */
4454 if (ctx->bc->chip_class == CAYMAN) {
4455 for (j = 0 ; j < 4; j++) {
4456 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4457 alu.op = ALU_OP2_MULLO_UINT;
4458
4459 alu.dst.sel = tmp0;
4460 alu.dst.chan = j;
4461 alu.dst.write = (j == 1);
4462
4463 if (signed_op) {
4464 alu.src[0].sel = tmp2;
4465 alu.src[0].chan = 1;
4466 } else {
4467 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
4468 }
4469
4470 alu.src[1].sel = tmp0;
4471 alu.src[1].chan = 2;
4472
4473 alu.last = (j == 3);
4474 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4475 return r;
4476 }
4477 } else {
4478 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4479 alu.op = ALU_OP2_MULLO_UINT;
4480
4481 alu.dst.sel = tmp0;
4482 alu.dst.chan = 1;
4483 alu.dst.write = 1;
4484
4485 if (signed_op) {
4486 alu.src[0].sel = tmp2;
4487 alu.src[0].chan = 1;
4488 } else {
4489 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
4490 }
4491
4492 alu.src[1].sel = tmp0;
4493 alu.src[1].chan = 2;
4494
4495 alu.last = 1;
4496 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4497 return r;
4498 }
4499
4500 /* 12. tmp0.w = src1 - tmp0.y = r */
4501 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4502 alu.op = ALU_OP2_SUB_INT;
4503
4504 alu.dst.sel = tmp0;
4505 alu.dst.chan = 3;
4506 alu.dst.write = 1;
4507
4508 if (signed_op) {
4509 alu.src[0].sel = tmp2;
4510 alu.src[0].chan = 0;
4511 } else {
4512 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4513 }
4514
4515 alu.src[1].sel = tmp0;
4516 alu.src[1].chan = 1;
4517
4518 alu.last = 1;
4519 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4520 return r;
4521
4522 /* 13. tmp1.x = tmp0.w >= src2 = r >= src2 */
4523 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4524 alu.op = ALU_OP2_SETGE_UINT;
4525
4526 alu.dst.sel = tmp1;
4527 alu.dst.chan = 0;
4528 alu.dst.write = 1;
4529
4530 alu.src[0].sel = tmp0;
4531 alu.src[0].chan = 3;
4532 if (signed_op) {
4533 alu.src[1].sel = tmp2;
4534 alu.src[1].chan = 1;
4535 } else {
4536 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4537 }
4538
4539 alu.last = 1;
4540 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4541 return r;
4542
4543 /* 14. tmp1.y = src1 >= tmp0.y = r >= 0 */
4544 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4545 alu.op = ALU_OP2_SETGE_UINT;
4546
4547 alu.dst.sel = tmp1;
4548 alu.dst.chan = 1;
4549 alu.dst.write = 1;
4550
4551 if (signed_op) {
4552 alu.src[0].sel = tmp2;
4553 alu.src[0].chan = 0;
4554 } else {
4555 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4556 }
4557
4558 alu.src[1].sel = tmp0;
4559 alu.src[1].chan = 1;
4560
4561 alu.last = 1;
4562 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4563 return r;
4564
4565 if (mod) { /* UMOD */
4566
4567 /* 15. tmp1.z = tmp0.w - src2 = r - src2 */
4568 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4569 alu.op = ALU_OP2_SUB_INT;
4570
4571 alu.dst.sel = tmp1;
4572 alu.dst.chan = 2;
4573 alu.dst.write = 1;
4574
4575 alu.src[0].sel = tmp0;
4576 alu.src[0].chan = 3;
4577
4578 if (signed_op) {
4579 alu.src[1].sel = tmp2;
4580 alu.src[1].chan = 1;
4581 } else {
4582 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4583 }
4584
4585 alu.last = 1;
4586 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4587 return r;
4588
4589 /* 16. tmp1.w = tmp0.w + src2 = r + src2 */
4590 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4591 alu.op = ALU_OP2_ADD_INT;
4592
4593 alu.dst.sel = tmp1;
4594 alu.dst.chan = 3;
4595 alu.dst.write = 1;
4596
4597 alu.src[0].sel = tmp0;
4598 alu.src[0].chan = 3;
4599 if (signed_op) {
4600 alu.src[1].sel = tmp2;
4601 alu.src[1].chan = 1;
4602 } else {
4603 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
4604 }
4605
4606 alu.last = 1;
4607 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4608 return r;
4609
4610 } else { /* UDIV */
4611
4612 /* 15. tmp1.z = tmp0.z + 1 = q + 1 DIV */
4613 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4614 alu.op = ALU_OP2_ADD_INT;
4615
4616 alu.dst.sel = tmp1;
4617 alu.dst.chan = 2;
4618 alu.dst.write = 1;
4619
4620 alu.src[0].sel = tmp0;
4621 alu.src[0].chan = 2;
4622 alu.src[1].sel = V_SQ_ALU_SRC_1_INT;
4623
4624 alu.last = 1;
4625 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4626 return r;
4627
4628 /* 16. tmp1.w = tmp0.z - 1 = q - 1 */
4629 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4630 alu.op = ALU_OP2_ADD_INT;
4631
4632 alu.dst.sel = tmp1;
4633 alu.dst.chan = 3;
4634 alu.dst.write = 1;
4635
4636 alu.src[0].sel = tmp0;
4637 alu.src[0].chan = 2;
4638 alu.src[1].sel = V_SQ_ALU_SRC_M_1_INT;
4639
4640 alu.last = 1;
4641 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4642 return r;
4643
4644 }
4645
4646 /* 17. tmp1.x = tmp1.x & tmp1.y */
4647 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4648 alu.op = ALU_OP2_AND_INT;
4649
4650 alu.dst.sel = tmp1;
4651 alu.dst.chan = 0;
4652 alu.dst.write = 1;
4653
4654 alu.src[0].sel = tmp1;
4655 alu.src[0].chan = 0;
4656 alu.src[1].sel = tmp1;
4657 alu.src[1].chan = 1;
4658
4659 alu.last = 1;
4660 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4661 return r;
4662
4663 /* 18. tmp0.z = tmp1.x==0 ? tmp0.z : tmp1.z DIV */
4664 /* 18. tmp0.z = tmp1.x==0 ? tmp0.w : tmp1.z MOD */
4665 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4666 alu.op = ALU_OP3_CNDE_INT;
4667 alu.is_op3 = 1;
4668
4669 alu.dst.sel = tmp0;
4670 alu.dst.chan = 2;
4671 alu.dst.write = 1;
4672
4673 alu.src[0].sel = tmp1;
4674 alu.src[0].chan = 0;
4675 alu.src[1].sel = tmp0;
4676 alu.src[1].chan = mod ? 3 : 2;
4677 alu.src[2].sel = tmp1;
4678 alu.src[2].chan = 2;
4679
4680 alu.last = 1;
4681 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4682 return r;
4683
4684 /* 19. tmp0.z = tmp1.y==0 ? tmp1.w : tmp0.z */
4685 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4686 alu.op = ALU_OP3_CNDE_INT;
4687 alu.is_op3 = 1;
4688
4689 if (signed_op) {
4690 alu.dst.sel = tmp0;
4691 alu.dst.chan = 2;
4692 alu.dst.write = 1;
4693 } else {
4694 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4695 }
4696
4697 alu.src[0].sel = tmp1;
4698 alu.src[0].chan = 1;
4699 alu.src[1].sel = tmp1;
4700 alu.src[1].chan = 3;
4701 alu.src[2].sel = tmp0;
4702 alu.src[2].chan = 2;
4703
4704 alu.last = 1;
4705 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4706 return r;
4707
4708 if (signed_op) {
4709
4710 /* fix the sign of the result */
4711
4712 if (mod) {
4713
4714 /* tmp0.x = -tmp0.z */
4715 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4716 alu.op = ALU_OP2_SUB_INT;
4717
4718 alu.dst.sel = tmp0;
4719 alu.dst.chan = 0;
4720 alu.dst.write = 1;
4721
4722 alu.src[0].sel = V_SQ_ALU_SRC_0;
4723 alu.src[1].sel = tmp0;
4724 alu.src[1].chan = 2;
4725
4726 alu.last = 1;
4727 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4728 return r;
4729
4730 /* sign of the remainder is the same as the sign of src0 */
4731 /* tmp0.x = src0>=0 ? tmp0.z : tmp0.x */
4732 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4733 alu.op = ALU_OP3_CNDGE_INT;
4734 alu.is_op3 = 1;
4735
4736 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4737
4738 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4739 alu.src[1].sel = tmp0;
4740 alu.src[1].chan = 2;
4741 alu.src[2].sel = tmp0;
4742 alu.src[2].chan = 0;
4743
4744 alu.last = 1;
4745 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4746 return r;
4747
4748 } else {
4749
4750 /* tmp0.x = -tmp0.z */
4751 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4752 alu.op = ALU_OP2_SUB_INT;
4753
4754 alu.dst.sel = tmp0;
4755 alu.dst.chan = 0;
4756 alu.dst.write = 1;
4757
4758 alu.src[0].sel = V_SQ_ALU_SRC_0;
4759 alu.src[1].sel = tmp0;
4760 alu.src[1].chan = 2;
4761
4762 alu.last = 1;
4763 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4764 return r;
4765
4766 /* fix the quotient sign (same as the sign of src0*src1) */
4767 /* tmp0.x = tmp2.z>=0 ? tmp0.z : tmp0.x */
4768 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4769 alu.op = ALU_OP3_CNDGE_INT;
4770 alu.is_op3 = 1;
4771
4772 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4773
4774 alu.src[0].sel = tmp2;
4775 alu.src[0].chan = 2;
4776 alu.src[1].sel = tmp0;
4777 alu.src[1].chan = 2;
4778 alu.src[2].sel = tmp0;
4779 alu.src[2].chan = 0;
4780
4781 alu.last = 1;
4782 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
4783 return r;
4784 }
4785 }
4786 }
4787 return 0;
4788 }
4789
4790 static int tgsi_udiv(struct r600_shader_ctx *ctx)
4791 {
4792 return tgsi_divmod(ctx, 0, 0);
4793 }
4794
4795 static int tgsi_umod(struct r600_shader_ctx *ctx)
4796 {
4797 return tgsi_divmod(ctx, 1, 0);
4798 }
4799
4800 static int tgsi_idiv(struct r600_shader_ctx *ctx)
4801 {
4802 return tgsi_divmod(ctx, 0, 1);
4803 }
4804
4805 static int tgsi_imod(struct r600_shader_ctx *ctx)
4806 {
4807 return tgsi_divmod(ctx, 1, 1);
4808 }
4809
4810
4811 static int tgsi_f2i(struct r600_shader_ctx *ctx)
4812 {
4813 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4814 struct r600_bytecode_alu alu;
4815 int i, r;
4816 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4817 int last_inst = tgsi_last_instruction(write_mask);
4818
4819 for (i = 0; i < 4; i++) {
4820 if (!(write_mask & (1<<i)))
4821 continue;
4822
4823 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4824 alu.op = ALU_OP1_TRUNC;
4825
4826 alu.dst.sel = ctx->temp_reg;
4827 alu.dst.chan = i;
4828 alu.dst.write = 1;
4829
4830 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4831 if (i == last_inst)
4832 alu.last = 1;
4833 r = r600_bytecode_add_alu(ctx->bc, &alu);
4834 if (r)
4835 return r;
4836 }
4837
4838 for (i = 0; i < 4; i++) {
4839 if (!(write_mask & (1<<i)))
4840 continue;
4841
4842 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4843 alu.op = ctx->inst_info->op;
4844
4845 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4846
4847 alu.src[0].sel = ctx->temp_reg;
4848 alu.src[0].chan = i;
4849
4850 if (i == last_inst || alu.op == ALU_OP1_FLT_TO_UINT)
4851 alu.last = 1;
4852 r = r600_bytecode_add_alu(ctx->bc, &alu);
4853 if (r)
4854 return r;
4855 }
4856
4857 return 0;
4858 }
4859
4860 static int tgsi_iabs(struct r600_shader_ctx *ctx)
4861 {
4862 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4863 struct r600_bytecode_alu alu;
4864 int i, r;
4865 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4866 int last_inst = tgsi_last_instruction(write_mask);
4867
4868 /* tmp = -src */
4869 for (i = 0; i < 4; i++) {
4870 if (!(write_mask & (1<<i)))
4871 continue;
4872
4873 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4874 alu.op = ALU_OP2_SUB_INT;
4875
4876 alu.dst.sel = ctx->temp_reg;
4877 alu.dst.chan = i;
4878 alu.dst.write = 1;
4879
4880 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4881 alu.src[0].sel = V_SQ_ALU_SRC_0;
4882
4883 if (i == last_inst)
4884 alu.last = 1;
4885 r = r600_bytecode_add_alu(ctx->bc, &alu);
4886 if (r)
4887 return r;
4888 }
4889
4890 /* dst = (src >= 0 ? src : tmp) */
4891 for (i = 0; i < 4; i++) {
4892 if (!(write_mask & (1<<i)))
4893 continue;
4894
4895 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4896 alu.op = ALU_OP3_CNDGE_INT;
4897 alu.is_op3 = 1;
4898 alu.dst.write = 1;
4899
4900 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4901
4902 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4903 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4904 alu.src[2].sel = ctx->temp_reg;
4905 alu.src[2].chan = i;
4906
4907 if (i == last_inst)
4908 alu.last = 1;
4909 r = r600_bytecode_add_alu(ctx->bc, &alu);
4910 if (r)
4911 return r;
4912 }
4913 return 0;
4914 }
4915
4916 static int tgsi_issg(struct r600_shader_ctx *ctx)
4917 {
4918 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4919 struct r600_bytecode_alu alu;
4920 int i, r;
4921 unsigned write_mask = inst->Dst[0].Register.WriteMask;
4922 int last_inst = tgsi_last_instruction(write_mask);
4923
4924 /* tmp = (src >= 0 ? src : -1) */
4925 for (i = 0; i < 4; i++) {
4926 if (!(write_mask & (1<<i)))
4927 continue;
4928
4929 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4930 alu.op = ALU_OP3_CNDGE_INT;
4931 alu.is_op3 = 1;
4932
4933 alu.dst.sel = ctx->temp_reg;
4934 alu.dst.chan = i;
4935 alu.dst.write = 1;
4936
4937 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4938 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
4939 alu.src[2].sel = V_SQ_ALU_SRC_M_1_INT;
4940
4941 if (i == last_inst)
4942 alu.last = 1;
4943 r = r600_bytecode_add_alu(ctx->bc, &alu);
4944 if (r)
4945 return r;
4946 }
4947
4948 /* dst = (tmp > 0 ? 1 : tmp) */
4949 for (i = 0; i < 4; i++) {
4950 if (!(write_mask & (1<<i)))
4951 continue;
4952
4953 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4954 alu.op = ALU_OP3_CNDGT_INT;
4955 alu.is_op3 = 1;
4956 alu.dst.write = 1;
4957
4958 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
4959
4960 alu.src[0].sel = ctx->temp_reg;
4961 alu.src[0].chan = i;
4962
4963 alu.src[1].sel = V_SQ_ALU_SRC_1_INT;
4964
4965 alu.src[2].sel = ctx->temp_reg;
4966 alu.src[2].chan = i;
4967
4968 if (i == last_inst)
4969 alu.last = 1;
4970 r = r600_bytecode_add_alu(ctx->bc, &alu);
4971 if (r)
4972 return r;
4973 }
4974 return 0;
4975 }
4976
4977
4978
4979 static int tgsi_ssg(struct r600_shader_ctx *ctx)
4980 {
4981 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
4982 struct r600_bytecode_alu alu;
4983 int i, r;
4984
4985 /* tmp = (src > 0 ? 1 : src) */
4986 for (i = 0; i < 4; i++) {
4987 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
4988 alu.op = ALU_OP3_CNDGT;
4989 alu.is_op3 = 1;
4990
4991 alu.dst.sel = ctx->temp_reg;
4992 alu.dst.chan = i;
4993
4994 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
4995 alu.src[1].sel = V_SQ_ALU_SRC_1;
4996 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
4997
4998 if (i == 3)
4999 alu.last = 1;
5000 r = r600_bytecode_add_alu(ctx->bc, &alu);
5001 if (r)
5002 return r;
5003 }
5004
5005 /* dst = (-tmp > 0 ? -1 : tmp) */
5006 for (i = 0; i < 4; i++) {
5007 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5008 alu.op = ALU_OP3_CNDGT;
5009 alu.is_op3 = 1;
5010 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5011
5012 alu.src[0].sel = ctx->temp_reg;
5013 alu.src[0].chan = i;
5014 alu.src[0].neg = 1;
5015
5016 alu.src[1].sel = V_SQ_ALU_SRC_1;
5017 alu.src[1].neg = 1;
5018
5019 alu.src[2].sel = ctx->temp_reg;
5020 alu.src[2].chan = i;
5021
5022 if (i == 3)
5023 alu.last = 1;
5024 r = r600_bytecode_add_alu(ctx->bc, &alu);
5025 if (r)
5026 return r;
5027 }
5028 return 0;
5029 }
5030
5031 static int tgsi_bfi(struct r600_shader_ctx *ctx)
5032 {
5033 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5034 struct r600_bytecode_alu alu;
5035 int i, r, t1, t2;
5036
5037 unsigned write_mask = inst->Dst[0].Register.WriteMask;
5038 int last_inst = tgsi_last_instruction(write_mask);
5039
5040 t1 = ctx->temp_reg;
5041
5042 for (i = 0; i < 4; i++) {
5043 if (!(write_mask & (1<<i)))
5044 continue;
5045
5046 /* create mask tmp */
5047 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5048 alu.op = ALU_OP2_BFM_INT;
5049 alu.dst.sel = t1;
5050 alu.dst.chan = i;
5051 alu.dst.write = 1;
5052 alu.last = i == last_inst;
5053
5054 r600_bytecode_src(&alu.src[0], &ctx->src[3], i);
5055 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
5056
5057 r = r600_bytecode_add_alu(ctx->bc, &alu);
5058 if (r)
5059 return r;
5060 }
5061
5062 t2 = r600_get_temp(ctx);
5063
5064 for (i = 0; i < 4; i++) {
5065 if (!(write_mask & (1<<i)))
5066 continue;
5067
5068 /* shift insert left */
5069 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5070 alu.op = ALU_OP2_LSHL_INT;
5071 alu.dst.sel = t2;
5072 alu.dst.chan = i;
5073 alu.dst.write = 1;
5074 alu.last = i == last_inst;
5075
5076 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
5077 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
5078
5079 r = r600_bytecode_add_alu(ctx->bc, &alu);
5080 if (r)
5081 return r;
5082 }
5083
5084 for (i = 0; i < 4; i++) {
5085 if (!(write_mask & (1<<i)))
5086 continue;
5087
5088 /* actual bitfield insert */
5089 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5090 alu.op = ALU_OP3_BFI_INT;
5091 alu.is_op3 = 1;
5092 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5093 alu.dst.chan = i;
5094 alu.dst.write = 1;
5095 alu.last = i == last_inst;
5096
5097 alu.src[0].sel = t1;
5098 alu.src[0].chan = i;
5099 alu.src[1].sel = t2;
5100 alu.src[1].chan = i;
5101 r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
5102
5103 r = r600_bytecode_add_alu(ctx->bc, &alu);
5104 if (r)
5105 return r;
5106 }
5107
5108 return 0;
5109 }
5110
5111 static int tgsi_msb(struct r600_shader_ctx *ctx)
5112 {
5113 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5114 struct r600_bytecode_alu alu;
5115 int i, r, t1, t2;
5116
5117 unsigned write_mask = inst->Dst[0].Register.WriteMask;
5118 int last_inst = tgsi_last_instruction(write_mask);
5119
5120 assert(ctx->inst_info->op == ALU_OP1_FFBH_INT ||
5121 ctx->inst_info->op == ALU_OP1_FFBH_UINT);
5122
5123 t1 = ctx->temp_reg;
5124
5125 /* bit position is indexed from lsb by TGSI, and from msb by the hardware */
5126 for (i = 0; i < 4; i++) {
5127 if (!(write_mask & (1<<i)))
5128 continue;
5129
5130 /* t1 = FFBH_INT / FFBH_UINT */
5131 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5132 alu.op = ctx->inst_info->op;
5133 alu.dst.sel = t1;
5134 alu.dst.chan = i;
5135 alu.dst.write = 1;
5136 alu.last = i == last_inst;
5137
5138 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5139
5140 r = r600_bytecode_add_alu(ctx->bc, &alu);
5141 if (r)
5142 return r;
5143 }
5144
5145 t2 = r600_get_temp(ctx);
5146
5147 for (i = 0; i < 4; i++) {
5148 if (!(write_mask & (1<<i)))
5149 continue;
5150
5151 /* t2 = 31 - t1 */
5152 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5153 alu.op = ALU_OP2_SUB_INT;
5154 alu.dst.sel = t2;
5155 alu.dst.chan = i;
5156 alu.dst.write = 1;
5157 alu.last = i == last_inst;
5158
5159 alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
5160 alu.src[0].value = 31;
5161 alu.src[1].sel = t1;
5162 alu.src[1].chan = i;
5163
5164 r = r600_bytecode_add_alu(ctx->bc, &alu);
5165 if (r)
5166 return r;
5167 }
5168
5169 for (i = 0; i < 4; i++) {
5170 if (!(write_mask & (1<<i)))
5171 continue;
5172
5173 /* result = t1 >= 0 ? t2 : t1 */
5174 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5175 alu.op = ALU_OP3_CNDGE_INT;
5176 alu.is_op3 = 1;
5177 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5178 alu.dst.chan = i;
5179 alu.dst.write = 1;
5180 alu.last = i == last_inst;
5181
5182 alu.src[0].sel = t1;
5183 alu.src[0].chan = i;
5184 alu.src[1].sel = t2;
5185 alu.src[1].chan = i;
5186 alu.src[2].sel = t1;
5187 alu.src[2].chan = i;
5188
5189 r = r600_bytecode_add_alu(ctx->bc, &alu);
5190 if (r)
5191 return r;
5192 }
5193
5194 return 0;
5195 }
5196
5197 static int tgsi_interp_egcm(struct r600_shader_ctx *ctx)
5198 {
5199 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5200 struct r600_bytecode_alu alu;
5201 int r, i = 0, k, interp_gpr, interp_base_chan, tmp, lasti;
5202 unsigned location;
5203 int input;
5204
5205 assert(inst->Src[0].Register.File == TGSI_FILE_INPUT);
5206
5207 input = inst->Src[0].Register.Index;
5208
5209 /* Interpolators have been marked for use already by allocate_system_value_inputs */
5210 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
5211 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5212 location = TGSI_INTERPOLATE_LOC_CENTER; /* sample offset will be added explicitly */
5213 }
5214 else {
5215 location = TGSI_INTERPOLATE_LOC_CENTROID;
5216 }
5217
5218 k = eg_get_interpolator_index(ctx->shader->input[input].interpolate, location);
5219 if (k < 0)
5220 k = 0;
5221 interp_gpr = ctx->eg_interpolators[k].ij_index / 2;
5222 interp_base_chan = 2 * (ctx->eg_interpolators[k].ij_index % 2);
5223
5224 /* NOTE: currently offset is not perspective correct */
5225 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
5226 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5227 int sample_gpr = -1;
5228 int gradientsH, gradientsV;
5229 struct r600_bytecode_tex tex;
5230
5231 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5232 sample_gpr = load_sample_position(ctx, &ctx->src[1], ctx->src[1].swizzle[0]);
5233 }
5234
5235 gradientsH = r600_get_temp(ctx);
5236 gradientsV = r600_get_temp(ctx);
5237 for (i = 0; i < 2; i++) {
5238 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
5239 tex.op = i == 0 ? FETCH_OP_GET_GRADIENTS_H : FETCH_OP_GET_GRADIENTS_V;
5240 tex.src_gpr = interp_gpr;
5241 tex.src_sel_x = interp_base_chan + 0;
5242 tex.src_sel_y = interp_base_chan + 1;
5243 tex.src_sel_z = 0;
5244 tex.src_sel_w = 0;
5245 tex.dst_gpr = i == 0 ? gradientsH : gradientsV;
5246 tex.dst_sel_x = 0;
5247 tex.dst_sel_y = 1;
5248 tex.dst_sel_z = 7;
5249 tex.dst_sel_w = 7;
5250 tex.inst_mod = 1; // Use per pixel gradient calculation
5251 tex.sampler_id = 0;
5252 tex.resource_id = tex.sampler_id;
5253 r = r600_bytecode_add_tex(ctx->bc, &tex);
5254 if (r)
5255 return r;
5256 }
5257
5258 for (i = 0; i < 2; i++) {
5259 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5260 alu.op = ALU_OP3_MULADD;
5261 alu.is_op3 = 1;
5262 alu.src[0].sel = gradientsH;
5263 alu.src[0].chan = i;
5264 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5265 alu.src[1].sel = sample_gpr;
5266 alu.src[1].chan = 2;
5267 }
5268 else {
5269 r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
5270 }
5271 alu.src[2].sel = interp_gpr;
5272 alu.src[2].chan = interp_base_chan + i;
5273 alu.dst.sel = ctx->temp_reg;
5274 alu.dst.chan = i;
5275 alu.last = i == 1;
5276
5277 r = r600_bytecode_add_alu(ctx->bc, &alu);
5278 if (r)
5279 return r;
5280 }
5281
5282 for (i = 0; i < 2; i++) {
5283 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5284 alu.op = ALU_OP3_MULADD;
5285 alu.is_op3 = 1;
5286 alu.src[0].sel = gradientsV;
5287 alu.src[0].chan = i;
5288 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5289 alu.src[1].sel = sample_gpr;
5290 alu.src[1].chan = 3;
5291 }
5292 else {
5293 r600_bytecode_src(&alu.src[1], &ctx->src[1], 1);
5294 }
5295 alu.src[2].sel = ctx->temp_reg;
5296 alu.src[2].chan = i;
5297 alu.dst.sel = ctx->temp_reg;
5298 alu.dst.chan = i;
5299 alu.last = i == 1;
5300
5301 r = r600_bytecode_add_alu(ctx->bc, &alu);
5302 if (r)
5303 return r;
5304 }
5305 }
5306
5307 tmp = r600_get_temp(ctx);
5308 for (i = 0; i < 8; i++) {
5309 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5310 alu.op = i < 4 ? ALU_OP2_INTERP_ZW : ALU_OP2_INTERP_XY;
5311
5312 alu.dst.sel = tmp;
5313 if ((i > 1 && i < 6)) {
5314 alu.dst.write = 1;
5315 }
5316 else {
5317 alu.dst.write = 0;
5318 }
5319 alu.dst.chan = i % 4;
5320
5321 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
5322 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5323 alu.src[0].sel = ctx->temp_reg;
5324 alu.src[0].chan = 1 - (i % 2);
5325 } else {
5326 alu.src[0].sel = interp_gpr;
5327 alu.src[0].chan = interp_base_chan + 1 - (i % 2);
5328 }
5329 alu.src[1].sel = V_SQ_ALU_SRC_PARAM_BASE + ctx->shader->input[input].lds_pos;
5330 alu.src[1].chan = 0;
5331
5332 alu.last = i % 4 == 3;
5333 alu.bank_swizzle_force = SQ_ALU_VEC_210;
5334
5335 r = r600_bytecode_add_alu(ctx->bc, &alu);
5336 if (r)
5337 return r;
5338 }
5339
5340 // INTERP can't swizzle dst
5341 lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
5342 for (i = 0; i <= lasti; i++) {
5343 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
5344 continue;
5345
5346 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5347 alu.op = ALU_OP1_MOV;
5348 alu.src[0].sel = tmp;
5349 alu.src[0].chan = ctx->src[0].swizzle[i];
5350 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5351 alu.dst.write = 1;
5352 alu.last = i == lasti;
5353 r = r600_bytecode_add_alu(ctx->bc, &alu);
5354 if (r)
5355 return r;
5356 }
5357
5358 return 0;
5359 }
5360
5361
5362 static int tgsi_helper_copy(struct r600_shader_ctx *ctx, struct tgsi_full_instruction *inst)
5363 {
5364 struct r600_bytecode_alu alu;
5365 int i, r;
5366
5367 for (i = 0; i < 4; i++) {
5368 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5369 if (!(inst->Dst[0].Register.WriteMask & (1 << i))) {
5370 alu.op = ALU_OP0_NOP;
5371 alu.dst.chan = i;
5372 } else {
5373 alu.op = ALU_OP1_MOV;
5374 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5375 alu.src[0].sel = ctx->temp_reg;
5376 alu.src[0].chan = i;
5377 }
5378 if (i == 3) {
5379 alu.last = 1;
5380 }
5381 r = r600_bytecode_add_alu(ctx->bc, &alu);
5382 if (r)
5383 return r;
5384 }
5385 return 0;
5386 }
5387
5388 static int tgsi_make_src_for_op3(struct r600_shader_ctx *ctx,
5389 unsigned temp, int chan,
5390 struct r600_bytecode_alu_src *bc_src,
5391 const struct r600_shader_src *shader_src)
5392 {
5393 struct r600_bytecode_alu alu;
5394 int r;
5395
5396 r600_bytecode_src(bc_src, shader_src, chan);
5397
5398 /* op3 operands don't support abs modifier */
5399 if (bc_src->abs) {
5400 assert(temp!=0); /* we actually need the extra register, make sure it is allocated. */
5401 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5402 alu.op = ALU_OP1_MOV;
5403 alu.dst.sel = temp;
5404 alu.dst.chan = chan;
5405 alu.dst.write = 1;
5406
5407 alu.src[0] = *bc_src;
5408 alu.last = true; // sufficient?
5409 r = r600_bytecode_add_alu(ctx->bc, &alu);
5410 if (r)
5411 return r;
5412
5413 memset(bc_src, 0, sizeof(*bc_src));
5414 bc_src->sel = temp;
5415 bc_src->chan = chan;
5416 }
5417 return 0;
5418 }
5419
5420 static int tgsi_op3(struct r600_shader_ctx *ctx)
5421 {
5422 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5423 struct r600_bytecode_alu alu;
5424 int i, j, r;
5425 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
5426 int temp_regs[4];
5427
5428 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
5429 temp_regs[j] = 0;
5430 if (ctx->src[j].abs)
5431 temp_regs[j] = r600_get_temp(ctx);
5432 }
5433 for (i = 0; i < lasti + 1; i++) {
5434 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
5435 continue;
5436
5437 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5438 alu.op = ctx->inst_info->op;
5439 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
5440 r = tgsi_make_src_for_op3(ctx, temp_regs[j], i, &alu.src[j], &ctx->src[j]);
5441 if (r)
5442 return r;
5443 }
5444
5445 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5446 alu.dst.chan = i;
5447 alu.dst.write = 1;
5448 alu.is_op3 = 1;
5449 if (i == lasti) {
5450 alu.last = 1;
5451 }
5452 r = r600_bytecode_add_alu(ctx->bc, &alu);
5453 if (r)
5454 return r;
5455 }
5456 return 0;
5457 }
5458
5459 static int tgsi_dp(struct r600_shader_ctx *ctx)
5460 {
5461 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5462 struct r600_bytecode_alu alu;
5463 int i, j, r;
5464
5465 for (i = 0; i < 4; i++) {
5466 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5467 alu.op = ctx->inst_info->op;
5468 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
5469 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
5470 }
5471
5472 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
5473 alu.dst.chan = i;
5474 alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
5475 /* handle some special cases */
5476 switch (inst->Instruction.Opcode) {
5477 case TGSI_OPCODE_DP2:
5478 if (i > 1) {
5479 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
5480 alu.src[0].chan = alu.src[1].chan = 0;
5481 }
5482 break;
5483 case TGSI_OPCODE_DP3:
5484 if (i > 2) {
5485 alu.src[0].sel = alu.src[1].sel = V_SQ_ALU_SRC_0;
5486 alu.src[0].chan = alu.src[1].chan = 0;
5487 }
5488 break;
5489 case TGSI_OPCODE_DPH:
5490 if (i == 3) {
5491 alu.src[0].sel = V_SQ_ALU_SRC_1;
5492 alu.src[0].chan = 0;
5493 alu.src[0].neg = 0;
5494 }
5495 break;
5496 default:
5497 break;
5498 }
5499 if (i == 3) {
5500 alu.last = 1;
5501 }
5502 r = r600_bytecode_add_alu(ctx->bc, &alu);
5503 if (r)
5504 return r;
5505 }
5506 return 0;
5507 }
5508
5509 static inline boolean tgsi_tex_src_requires_loading(struct r600_shader_ctx *ctx,
5510 unsigned index)
5511 {
5512 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5513 return (inst->Src[index].Register.File != TGSI_FILE_TEMPORARY &&
5514 inst->Src[index].Register.File != TGSI_FILE_INPUT &&
5515 inst->Src[index].Register.File != TGSI_FILE_OUTPUT) ||
5516 ctx->src[index].neg || ctx->src[index].abs ||
5517 (inst->Src[index].Register.File == TGSI_FILE_INPUT && ctx->type == TGSI_PROCESSOR_GEOMETRY);
5518 }
5519
5520 static inline unsigned tgsi_tex_get_src_gpr(struct r600_shader_ctx *ctx,
5521 unsigned index)
5522 {
5523 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5524 return ctx->file_offset[inst->Src[index].Register.File] + inst->Src[index].Register.Index;
5525 }
5526
5527 static int do_vtx_fetch_inst(struct r600_shader_ctx *ctx, boolean src_requires_loading)
5528 {
5529 struct r600_bytecode_vtx vtx;
5530 struct r600_bytecode_alu alu;
5531 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5532 int src_gpr, r, i;
5533 int id = tgsi_tex_get_src_gpr(ctx, 1);
5534
5535 src_gpr = tgsi_tex_get_src_gpr(ctx, 0);
5536 if (src_requires_loading) {
5537 for (i = 0; i < 4; i++) {
5538 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5539 alu.op = ALU_OP1_MOV;
5540 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
5541 alu.dst.sel = ctx->temp_reg;
5542 alu.dst.chan = i;
5543 if (i == 3)
5544 alu.last = 1;
5545 alu.dst.write = 1;
5546 r = r600_bytecode_add_alu(ctx->bc, &alu);
5547 if (r)
5548 return r;
5549 }
5550 src_gpr = ctx->temp_reg;
5551 }
5552
5553 memset(&vtx, 0, sizeof(vtx));
5554 vtx.op = FETCH_OP_VFETCH;
5555 vtx.buffer_id = id + R600_MAX_CONST_BUFFERS;
5556 vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
5557 vtx.src_gpr = src_gpr;
5558 vtx.mega_fetch_count = 16;
5559 vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
5560 vtx.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; /* SEL_X */
5561 vtx.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7; /* SEL_Y */
5562 vtx.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7; /* SEL_Z */
5563 vtx.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7; /* SEL_W */
5564 vtx.use_const_fields = 1;
5565
5566 if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
5567 return r;
5568
5569 if (ctx->bc->chip_class >= EVERGREEN)
5570 return 0;
5571
5572 for (i = 0; i < 4; i++) {
5573 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
5574 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
5575 continue;
5576
5577 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5578 alu.op = ALU_OP2_AND_INT;
5579
5580 alu.dst.chan = i;
5581 alu.dst.sel = vtx.dst_gpr;
5582 alu.dst.write = 1;
5583
5584 alu.src[0].sel = vtx.dst_gpr;
5585 alu.src[0].chan = i;
5586
5587 alu.src[1].sel = R600_SHADER_BUFFER_INFO_SEL;
5588 alu.src[1].sel += (id * 2);
5589 alu.src[1].chan = i % 4;
5590 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
5591
5592 if (i == lasti)
5593 alu.last = 1;
5594 r = r600_bytecode_add_alu(ctx->bc, &alu);
5595 if (r)
5596 return r;
5597 }
5598
5599 if (inst->Dst[0].Register.WriteMask & 3) {
5600 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5601 alu.op = ALU_OP2_OR_INT;
5602
5603 alu.dst.chan = 3;
5604 alu.dst.sel = vtx.dst_gpr;
5605 alu.dst.write = 1;
5606
5607 alu.src[0].sel = vtx.dst_gpr;
5608 alu.src[0].chan = 3;
5609
5610 alu.src[1].sel = R600_SHADER_BUFFER_INFO_SEL + (id * 2) + 1;
5611 alu.src[1].chan = 0;
5612 alu.src[1].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
5613
5614 alu.last = 1;
5615 r = r600_bytecode_add_alu(ctx->bc, &alu);
5616 if (r)
5617 return r;
5618 }
5619 return 0;
5620 }
5621
5622 static int r600_do_buffer_txq(struct r600_shader_ctx *ctx)
5623 {
5624 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5625 struct r600_bytecode_alu alu;
5626 int r;
5627 int id = tgsi_tex_get_src_gpr(ctx, 1);
5628
5629 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5630 alu.op = ALU_OP1_MOV;
5631 alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
5632 if (ctx->bc->chip_class >= EVERGREEN) {
5633 /* channel 0 or 2 of each word */
5634 alu.src[0].sel += (id / 2);
5635 alu.src[0].chan = (id % 2) * 2;
5636 } else {
5637 /* r600 we have them at channel 2 of the second dword */
5638 alu.src[0].sel += (id * 2) + 1;
5639 alu.src[0].chan = 1;
5640 }
5641 alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
5642 tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
5643 alu.last = 1;
5644 r = r600_bytecode_add_alu(ctx->bc, &alu);
5645 if (r)
5646 return r;
5647 return 0;
5648 }
5649
5650 static int tgsi_tex(struct r600_shader_ctx *ctx)
5651 {
5652 static float one_point_five = 1.5f;
5653 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
5654 struct r600_bytecode_tex tex;
5655 struct r600_bytecode_alu alu;
5656 unsigned src_gpr;
5657 int r, i, j;
5658 int opcode;
5659 bool read_compressed_msaa = ctx->bc->has_compressed_msaa_texturing &&
5660 inst->Instruction.Opcode == TGSI_OPCODE_TXF &&
5661 (inst->Texture.Texture == TGSI_TEXTURE_2D_MSAA ||
5662 inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY_MSAA);
5663
5664 bool txf_add_offsets = inst->Texture.NumOffsets &&
5665 inst->Instruction.Opcode == TGSI_OPCODE_TXF &&
5666 inst->Texture.Texture != TGSI_TEXTURE_BUFFER;
5667
5668 /* Texture fetch instructions can only use gprs as source.
5669 * Also they cannot negate the source or take the absolute value */
5670 const boolean src_requires_loading = (inst->Instruction.Opcode != TGSI_OPCODE_TXQ_LZ &&
5671 inst->Instruction.Opcode != TGSI_OPCODE_TXQS &&
5672 tgsi_tex_src_requires_loading(ctx, 0)) ||
5673 read_compressed_msaa || txf_add_offsets;
5674
5675 boolean src_loaded = FALSE;
5676 unsigned sampler_src_reg = inst->Instruction.Opcode == TGSI_OPCODE_TXQ_LZ ? 0 : 1;
5677 int8_t offset_x = 0, offset_y = 0, offset_z = 0;
5678 boolean has_txq_cube_array_z = false;
5679 unsigned sampler_index_mode;
5680
5681 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ &&
5682 ((inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
5683 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY)))
5684 if (inst->Dst[0].Register.WriteMask & 4) {
5685 ctx->shader->has_txq_cube_array_z_comp = true;
5686 has_txq_cube_array_z = true;
5687 }
5688
5689 if (inst->Instruction.Opcode == TGSI_OPCODE_TEX2 ||
5690 inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
5691 inst->Instruction.Opcode == TGSI_OPCODE_TXL2 ||
5692 inst->Instruction.Opcode == TGSI_OPCODE_TG4)
5693 sampler_src_reg = 2;
5694
5695 /* TGSI moves the sampler to src reg 3 for TXD */
5696 if (inst->Instruction.Opcode == TGSI_OPCODE_TXD)
5697 sampler_src_reg = 3;
5698
5699 sampler_index_mode = inst->Src[sampler_src_reg].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
5700
5701 src_gpr = tgsi_tex_get_src_gpr(ctx, 0);
5702
5703 if (inst->Texture.Texture == TGSI_TEXTURE_BUFFER) {
5704 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ) {
5705 ctx->shader->uses_tex_buffers = true;
5706 return r600_do_buffer_txq(ctx);
5707 }
5708 else if (inst->Instruction.Opcode == TGSI_OPCODE_TXF) {
5709 if (ctx->bc->chip_class < EVERGREEN)
5710 ctx->shader->uses_tex_buffers = true;
5711 return do_vtx_fetch_inst(ctx, src_requires_loading);
5712 }
5713 }
5714
5715 if (inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
5716 int out_chan;
5717 /* Add perspective divide */
5718 if (ctx->bc->chip_class == CAYMAN) {
5719 out_chan = 2;
5720 for (i = 0; i < 3; i++) {
5721 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5722 alu.op = ALU_OP1_RECIP_IEEE;
5723 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
5724
5725 alu.dst.sel = ctx->temp_reg;
5726 alu.dst.chan = i;
5727 if (i == 2)
5728 alu.last = 1;
5729 if (out_chan == i)
5730 alu.dst.write = 1;
5731 r = r600_bytecode_add_alu(ctx->bc, &alu);
5732 if (r)
5733 return r;
5734 }
5735
5736 } else {
5737 out_chan = 3;
5738 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5739 alu.op = ALU_OP1_RECIP_IEEE;
5740 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
5741
5742 alu.dst.sel = ctx->temp_reg;
5743 alu.dst.chan = out_chan;
5744 alu.last = 1;
5745 alu.dst.write = 1;
5746 r = r600_bytecode_add_alu(ctx->bc, &alu);
5747 if (r)
5748 return r;
5749 }
5750
5751 for (i = 0; i < 3; i++) {
5752 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5753 alu.op = ALU_OP2_MUL;
5754 alu.src[0].sel = ctx->temp_reg;
5755 alu.src[0].chan = out_chan;
5756 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
5757 alu.dst.sel = ctx->temp_reg;
5758 alu.dst.chan = i;
5759 alu.dst.write = 1;
5760 r = r600_bytecode_add_alu(ctx->bc, &alu);
5761 if (r)
5762 return r;
5763 }
5764 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5765 alu.op = ALU_OP1_MOV;
5766 alu.src[0].sel = V_SQ_ALU_SRC_1;
5767 alu.src[0].chan = 0;
5768 alu.dst.sel = ctx->temp_reg;
5769 alu.dst.chan = 3;
5770 alu.last = 1;
5771 alu.dst.write = 1;
5772 r = r600_bytecode_add_alu(ctx->bc, &alu);
5773 if (r)
5774 return r;
5775 src_loaded = TRUE;
5776 src_gpr = ctx->temp_reg;
5777 }
5778
5779
5780 if ((inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
5781 inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
5782 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
5783 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) &&
5784 inst->Instruction.Opcode != TGSI_OPCODE_TXQ &&
5785 inst->Instruction.Opcode != TGSI_OPCODE_TXQ_LZ) {
5786
5787 static const unsigned src0_swizzle[] = {2, 2, 0, 1};
5788 static const unsigned src1_swizzle[] = {1, 0, 2, 2};
5789
5790 /* tmp1.xyzw = CUBE(R0.zzxy, R0.yxzz) */
5791 for (i = 0; i < 4; i++) {
5792 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5793 alu.op = ALU_OP2_CUBE;
5794 r600_bytecode_src(&alu.src[0], &ctx->src[0], src0_swizzle[i]);
5795 r600_bytecode_src(&alu.src[1], &ctx->src[0], src1_swizzle[i]);
5796 alu.dst.sel = ctx->temp_reg;
5797 alu.dst.chan = i;
5798 if (i == 3)
5799 alu.last = 1;
5800 alu.dst.write = 1;
5801 r = r600_bytecode_add_alu(ctx->bc, &alu);
5802 if (r)
5803 return r;
5804 }
5805
5806 /* tmp1.z = RCP_e(|tmp1.z|) */
5807 if (ctx->bc->chip_class == CAYMAN) {
5808 for (i = 0; i < 3; i++) {
5809 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5810 alu.op = ALU_OP1_RECIP_IEEE;
5811 alu.src[0].sel = ctx->temp_reg;
5812 alu.src[0].chan = 2;
5813 alu.src[0].abs = 1;
5814 alu.dst.sel = ctx->temp_reg;
5815 alu.dst.chan = i;
5816 if (i == 2)
5817 alu.dst.write = 1;
5818 if (i == 2)
5819 alu.last = 1;
5820 r = r600_bytecode_add_alu(ctx->bc, &alu);
5821 if (r)
5822 return r;
5823 }
5824 } else {
5825 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5826 alu.op = ALU_OP1_RECIP_IEEE;
5827 alu.src[0].sel = ctx->temp_reg;
5828 alu.src[0].chan = 2;
5829 alu.src[0].abs = 1;
5830 alu.dst.sel = ctx->temp_reg;
5831 alu.dst.chan = 2;
5832 alu.dst.write = 1;
5833 alu.last = 1;
5834 r = r600_bytecode_add_alu(ctx->bc, &alu);
5835 if (r)
5836 return r;
5837 }
5838
5839 /* MULADD R0.x, R0.x, PS1, (0x3FC00000, 1.5f).x
5840 * MULADD R0.y, R0.y, PS1, (0x3FC00000, 1.5f).x
5841 * muladd has no writemask, have to use another temp
5842 */
5843 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5844 alu.op = ALU_OP3_MULADD;
5845 alu.is_op3 = 1;
5846
5847 alu.src[0].sel = ctx->temp_reg;
5848 alu.src[0].chan = 0;
5849 alu.src[1].sel = ctx->temp_reg;
5850 alu.src[1].chan = 2;
5851
5852 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
5853 alu.src[2].chan = 0;
5854 alu.src[2].value = *(uint32_t *)&one_point_five;
5855
5856 alu.dst.sel = ctx->temp_reg;
5857 alu.dst.chan = 0;
5858 alu.dst.write = 1;
5859
5860 r = r600_bytecode_add_alu(ctx->bc, &alu);
5861 if (r)
5862 return r;
5863
5864 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5865 alu.op = ALU_OP3_MULADD;
5866 alu.is_op3 = 1;
5867
5868 alu.src[0].sel = ctx->temp_reg;
5869 alu.src[0].chan = 1;
5870 alu.src[1].sel = ctx->temp_reg;
5871 alu.src[1].chan = 2;
5872
5873 alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
5874 alu.src[2].chan = 0;
5875 alu.src[2].value = *(uint32_t *)&one_point_five;
5876
5877 alu.dst.sel = ctx->temp_reg;
5878 alu.dst.chan = 1;
5879 alu.dst.write = 1;
5880
5881 alu.last = 1;
5882 r = r600_bytecode_add_alu(ctx->bc, &alu);
5883 if (r)
5884 return r;
5885 /* write initial compare value into Z component
5886 - W src 0 for shadow cube
5887 - X src 1 for shadow cube array */
5888 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
5889 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
5890 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5891 alu.op = ALU_OP1_MOV;
5892 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY)
5893 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
5894 else
5895 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
5896 alu.dst.sel = ctx->temp_reg;
5897 alu.dst.chan = 2;
5898 alu.dst.write = 1;
5899 alu.last = 1;
5900 r = r600_bytecode_add_alu(ctx->bc, &alu);
5901 if (r)
5902 return r;
5903 }
5904
5905 if (inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
5906 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
5907 if (ctx->bc->chip_class >= EVERGREEN) {
5908 int mytmp = r600_get_temp(ctx);
5909 static const float eight = 8.0f;
5910 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5911 alu.op = ALU_OP1_MOV;
5912 alu.src[0].sel = ctx->temp_reg;
5913 alu.src[0].chan = 3;
5914 alu.dst.sel = mytmp;
5915 alu.dst.chan = 0;
5916 alu.dst.write = 1;
5917 alu.last = 1;
5918 r = r600_bytecode_add_alu(ctx->bc, &alu);
5919 if (r)
5920 return r;
5921
5922 /* have to multiply original layer by 8 and add to face id (temp.w) in Z */
5923 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5924 alu.op = ALU_OP3_MULADD;
5925 alu.is_op3 = 1;
5926 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
5927 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
5928 alu.src[1].chan = 0;
5929 alu.src[1].value = *(uint32_t *)&eight;
5930 alu.src[2].sel = mytmp;
5931 alu.src[2].chan = 0;
5932 alu.dst.sel = ctx->temp_reg;
5933 alu.dst.chan = 3;
5934 alu.dst.write = 1;
5935 alu.last = 1;
5936 r = r600_bytecode_add_alu(ctx->bc, &alu);
5937 if (r)
5938 return r;
5939 } else if (ctx->bc->chip_class < EVERGREEN) {
5940 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
5941 tex.op = FETCH_OP_SET_CUBEMAP_INDEX;
5942 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
5943 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
5944 tex.src_gpr = r600_get_temp(ctx);
5945 tex.src_sel_x = 0;
5946 tex.src_sel_y = 0;
5947 tex.src_sel_z = 0;
5948 tex.src_sel_w = 0;
5949 tex.dst_sel_x = tex.dst_sel_y = tex.dst_sel_z = tex.dst_sel_w = 7;
5950 tex.coord_type_x = 1;
5951 tex.coord_type_y = 1;
5952 tex.coord_type_z = 1;
5953 tex.coord_type_w = 1;
5954 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5955 alu.op = ALU_OP1_MOV;
5956 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
5957 alu.dst.sel = tex.src_gpr;
5958 alu.dst.chan = 0;
5959 alu.last = 1;
5960 alu.dst.write = 1;
5961 r = r600_bytecode_add_alu(ctx->bc, &alu);
5962 if (r)
5963 return r;
5964
5965 r = r600_bytecode_add_tex(ctx->bc, &tex);
5966 if (r)
5967 return r;
5968 }
5969
5970 }
5971
5972 /* for cube forms of lod and bias we need to route things */
5973 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB ||
5974 inst->Instruction.Opcode == TGSI_OPCODE_TXL ||
5975 inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
5976 inst->Instruction.Opcode == TGSI_OPCODE_TXL2) {
5977 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
5978 alu.op = ALU_OP1_MOV;
5979 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB2 ||
5980 inst->Instruction.Opcode == TGSI_OPCODE_TXL2)
5981 r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
5982 else
5983 r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
5984 alu.dst.sel = ctx->temp_reg;
5985 alu.dst.chan = 2;
5986 alu.last = 1;
5987 alu.dst.write = 1;
5988 r = r600_bytecode_add_alu(ctx->bc, &alu);
5989 if (r)
5990 return r;
5991 }
5992
5993 src_loaded = TRUE;
5994 src_gpr = ctx->temp_reg;
5995 }
5996
5997 if (inst->Instruction.Opcode == TGSI_OPCODE_TXD) {
5998 int temp_h = 0, temp_v = 0;
5999 int start_val = 0;
6000
6001 /* if we've already loaded the src (i.e. CUBE don't reload it). */
6002 if (src_loaded == TRUE)
6003 start_val = 1;
6004 else
6005 src_loaded = TRUE;
6006 for (i = start_val; i < 3; i++) {
6007 int treg = r600_get_temp(ctx);
6008
6009 if (i == 0)
6010 src_gpr = treg;
6011 else if (i == 1)
6012 temp_h = treg;
6013 else
6014 temp_v = treg;
6015
6016 for (j = 0; j < 4; j++) {
6017 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6018 alu.op = ALU_OP1_MOV;
6019 r600_bytecode_src(&alu.src[0], &ctx->src[i], j);
6020 alu.dst.sel = treg;
6021 alu.dst.chan = j;
6022 if (j == 3)
6023 alu.last = 1;
6024 alu.dst.write = 1;
6025 r = r600_bytecode_add_alu(ctx->bc, &alu);
6026 if (r)
6027 return r;
6028 }
6029 }
6030 for (i = 1; i < 3; i++) {
6031 /* set gradients h/v */
6032 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
6033 tex.op = (i == 1) ? FETCH_OP_SET_GRADIENTS_H :
6034 FETCH_OP_SET_GRADIENTS_V;
6035 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
6036 tex.sampler_index_mode = sampler_index_mode;
6037 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
6038 tex.resource_index_mode = sampler_index_mode;
6039
6040 tex.src_gpr = (i == 1) ? temp_h : temp_v;
6041 tex.src_sel_x = 0;
6042 tex.src_sel_y = 1;
6043 tex.src_sel_z = 2;
6044 tex.src_sel_w = 3;
6045
6046 tex.dst_gpr = r600_get_temp(ctx); /* just to avoid confusing the asm scheduler */
6047 tex.dst_sel_x = tex.dst_sel_y = tex.dst_sel_z = tex.dst_sel_w = 7;
6048 if (inst->Texture.Texture != TGSI_TEXTURE_RECT) {
6049 tex.coord_type_x = 1;
6050 tex.coord_type_y = 1;
6051 tex.coord_type_z = 1;
6052 tex.coord_type_w = 1;
6053 }
6054 r = r600_bytecode_add_tex(ctx->bc, &tex);
6055 if (r)
6056 return r;
6057 }
6058 }
6059
6060 if (src_requires_loading && !src_loaded) {
6061 for (i = 0; i < 4; i++) {
6062 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6063 alu.op = ALU_OP1_MOV;
6064 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6065 alu.dst.sel = ctx->temp_reg;
6066 alu.dst.chan = i;
6067 if (i == 3)
6068 alu.last = 1;
6069 alu.dst.write = 1;
6070 r = r600_bytecode_add_alu(ctx->bc, &alu);
6071 if (r)
6072 return r;
6073 }
6074 src_loaded = TRUE;
6075 src_gpr = ctx->temp_reg;
6076 }
6077
6078 /* get offset values */
6079 if (inst->Texture.NumOffsets) {
6080 assert(inst->Texture.NumOffsets == 1);
6081
6082 /* The texture offset feature doesn't work with the TXF instruction
6083 * and must be emulated by adding the offset to the texture coordinates. */
6084 if (txf_add_offsets) {
6085 const struct tgsi_texture_offset *off = inst->TexOffsets;
6086
6087 switch (inst->Texture.Texture) {
6088 case TGSI_TEXTURE_3D:
6089 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6090 alu.op = ALU_OP2_ADD_INT;
6091 alu.src[0].sel = src_gpr;
6092 alu.src[0].chan = 2;
6093 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6094 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleZ];
6095 alu.dst.sel = src_gpr;
6096 alu.dst.chan = 2;
6097 alu.dst.write = 1;
6098 alu.last = 1;
6099 r = r600_bytecode_add_alu(ctx->bc, &alu);
6100 if (r)
6101 return r;
6102 /* fall through */
6103
6104 case TGSI_TEXTURE_2D:
6105 case TGSI_TEXTURE_SHADOW2D:
6106 case TGSI_TEXTURE_RECT:
6107 case TGSI_TEXTURE_SHADOWRECT:
6108 case TGSI_TEXTURE_2D_ARRAY:
6109 case TGSI_TEXTURE_SHADOW2D_ARRAY:
6110 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6111 alu.op = ALU_OP2_ADD_INT;
6112 alu.src[0].sel = src_gpr;
6113 alu.src[0].chan = 1;
6114 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6115 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleY];
6116 alu.dst.sel = src_gpr;
6117 alu.dst.chan = 1;
6118 alu.dst.write = 1;
6119 alu.last = 1;
6120 r = r600_bytecode_add_alu(ctx->bc, &alu);
6121 if (r)
6122 return r;
6123 /* fall through */
6124
6125 case TGSI_TEXTURE_1D:
6126 case TGSI_TEXTURE_SHADOW1D:
6127 case TGSI_TEXTURE_1D_ARRAY:
6128 case TGSI_TEXTURE_SHADOW1D_ARRAY:
6129 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6130 alu.op = ALU_OP2_ADD_INT;
6131 alu.src[0].sel = src_gpr;
6132 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6133 alu.src[1].value = ctx->literals[4 * off[0].Index + off[0].SwizzleX];
6134 alu.dst.sel = src_gpr;
6135 alu.dst.write = 1;
6136 alu.last = 1;
6137 r = r600_bytecode_add_alu(ctx->bc, &alu);
6138 if (r)
6139 return r;
6140 break;
6141 /* texture offsets do not apply to other texture targets */
6142 }
6143 } else {
6144 switch (inst->Texture.Texture) {
6145 case TGSI_TEXTURE_3D:
6146 offset_z = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleZ] << 1;
6147 /* fallthrough */
6148 case TGSI_TEXTURE_2D:
6149 case TGSI_TEXTURE_SHADOW2D:
6150 case TGSI_TEXTURE_RECT:
6151 case TGSI_TEXTURE_SHADOWRECT:
6152 case TGSI_TEXTURE_2D_ARRAY:
6153 case TGSI_TEXTURE_SHADOW2D_ARRAY:
6154 offset_y = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleY] << 1;
6155 /* fallthrough */
6156 case TGSI_TEXTURE_1D:
6157 case TGSI_TEXTURE_SHADOW1D:
6158 case TGSI_TEXTURE_1D_ARRAY:
6159 case TGSI_TEXTURE_SHADOW1D_ARRAY:
6160 offset_x = ctx->literals[4 * inst->TexOffsets[0].Index + inst->TexOffsets[0].SwizzleX] << 1;
6161 }
6162 }
6163 }
6164
6165 /* Obtain the sample index for reading a compressed MSAA color texture.
6166 * To read the FMASK, we use the ldfptr instruction, which tells us
6167 * where the samples are stored.
6168 * For uncompressed 8x MSAA surfaces, ldfptr should return 0x76543210,
6169 * which is the identity mapping. Each nibble says which physical sample
6170 * should be fetched to get that sample.
6171 *
6172 * Assume src.z contains the sample index. It should be modified like this:
6173 * src.z = (ldfptr() >> (src.z * 4)) & 0xF;
6174 * Then fetch the texel with src.
6175 */
6176 if (read_compressed_msaa) {
6177 unsigned sample_chan = 3;
6178 unsigned temp = r600_get_temp(ctx);
6179 assert(src_loaded);
6180
6181 /* temp.w = ldfptr() */
6182 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
6183 tex.op = FETCH_OP_LD;
6184 tex.inst_mod = 1; /* to indicate this is ldfptr */
6185 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
6186 tex.sampler_index_mode = sampler_index_mode;
6187 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
6188 tex.resource_index_mode = sampler_index_mode;
6189 tex.src_gpr = src_gpr;
6190 tex.dst_gpr = temp;
6191 tex.dst_sel_x = 7; /* mask out these components */
6192 tex.dst_sel_y = 7;
6193 tex.dst_sel_z = 7;
6194 tex.dst_sel_w = 0; /* store X */
6195 tex.src_sel_x = 0;
6196 tex.src_sel_y = 1;
6197 tex.src_sel_z = 2;
6198 tex.src_sel_w = 3;
6199 tex.offset_x = offset_x;
6200 tex.offset_y = offset_y;
6201 tex.offset_z = offset_z;
6202 r = r600_bytecode_add_tex(ctx->bc, &tex);
6203 if (r)
6204 return r;
6205
6206 /* temp.x = sample_index*4 */
6207 if (ctx->bc->chip_class == CAYMAN) {
6208 for (i = 0 ; i < 4; i++) {
6209 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6210 alu.op = ALU_OP2_MULLO_INT;
6211 alu.src[0].sel = src_gpr;
6212 alu.src[0].chan = sample_chan;
6213 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6214 alu.src[1].value = 4;
6215 alu.dst.sel = temp;
6216 alu.dst.chan = i;
6217 alu.dst.write = i == 0;
6218 if (i == 3)
6219 alu.last = 1;
6220 r = r600_bytecode_add_alu(ctx->bc, &alu);
6221 if (r)
6222 return r;
6223 }
6224 } else {
6225 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6226 alu.op = ALU_OP2_MULLO_INT;
6227 alu.src[0].sel = src_gpr;
6228 alu.src[0].chan = sample_chan;
6229 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6230 alu.src[1].value = 4;
6231 alu.dst.sel = temp;
6232 alu.dst.chan = 0;
6233 alu.dst.write = 1;
6234 alu.last = 1;
6235 r = r600_bytecode_add_alu(ctx->bc, &alu);
6236 if (r)
6237 return r;
6238 }
6239
6240 /* sample_index = temp.w >> temp.x */
6241 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6242 alu.op = ALU_OP2_LSHR_INT;
6243 alu.src[0].sel = temp;
6244 alu.src[0].chan = 3;
6245 alu.src[1].sel = temp;
6246 alu.src[1].chan = 0;
6247 alu.dst.sel = src_gpr;
6248 alu.dst.chan = sample_chan;
6249 alu.dst.write = 1;
6250 alu.last = 1;
6251 r = r600_bytecode_add_alu(ctx->bc, &alu);
6252 if (r)
6253 return r;
6254
6255 /* sample_index & 0xF */
6256 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6257 alu.op = ALU_OP2_AND_INT;
6258 alu.src[0].sel = src_gpr;
6259 alu.src[0].chan = sample_chan;
6260 alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
6261 alu.src[1].value = 0xF;
6262 alu.dst.sel = src_gpr;
6263 alu.dst.chan = sample_chan;
6264 alu.dst.write = 1;
6265 alu.last = 1;
6266 r = r600_bytecode_add_alu(ctx->bc, &alu);
6267 if (r)
6268 return r;
6269 #if 0
6270 /* visualize the FMASK */
6271 for (i = 0; i < 4; i++) {
6272 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6273 alu.op = ALU_OP1_INT_TO_FLT;
6274 alu.src[0].sel = src_gpr;
6275 alu.src[0].chan = sample_chan;
6276 alu.dst.sel = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
6277 alu.dst.chan = i;
6278 alu.dst.write = 1;
6279 alu.last = 1;
6280 r = r600_bytecode_add_alu(ctx->bc, &alu);
6281 if (r)
6282 return r;
6283 }
6284 return 0;
6285 #endif
6286 }
6287
6288 /* does this shader want a num layers from TXQ for a cube array? */
6289 if (has_txq_cube_array_z) {
6290 int id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
6291
6292 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6293 alu.op = ALU_OP1_MOV;
6294
6295 alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
6296 if (ctx->bc->chip_class >= EVERGREEN) {
6297 /* channel 1 or 3 of each word */
6298 alu.src[0].sel += (id / 2);
6299 alu.src[0].chan = ((id % 2) * 2) + 1;
6300 } else {
6301 /* r600 we have them at channel 2 of the second dword */
6302 alu.src[0].sel += (id * 2) + 1;
6303 alu.src[0].chan = 2;
6304 }
6305 alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
6306 tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
6307 alu.last = 1;
6308 r = r600_bytecode_add_alu(ctx->bc, &alu);
6309 if (r)
6310 return r;
6311 /* disable writemask from texture instruction */
6312 inst->Dst[0].Register.WriteMask &= ~4;
6313 }
6314
6315 opcode = ctx->inst_info->op;
6316 if (opcode == FETCH_OP_GATHER4 &&
6317 inst->TexOffsets[0].File != TGSI_FILE_NULL &&
6318 inst->TexOffsets[0].File != TGSI_FILE_IMMEDIATE) {
6319 opcode = FETCH_OP_GATHER4_O;
6320
6321 /* GATHER4_O/GATHER4_C_O use offset values loaded by
6322 SET_TEXTURE_OFFSETS instruction. The immediate offset values
6323 encoded in the instruction are ignored. */
6324 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
6325 tex.op = FETCH_OP_SET_TEXTURE_OFFSETS;
6326 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
6327 tex.sampler_index_mode = sampler_index_mode;
6328 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
6329 tex.resource_index_mode = sampler_index_mode;
6330
6331 tex.src_gpr = ctx->file_offset[inst->TexOffsets[0].File] + inst->TexOffsets[0].Index;
6332 tex.src_sel_x = inst->TexOffsets[0].SwizzleX;
6333 tex.src_sel_y = inst->TexOffsets[0].SwizzleY;
6334 tex.src_sel_z = inst->TexOffsets[0].SwizzleZ;
6335 tex.src_sel_w = 4;
6336
6337 tex.dst_sel_x = 7;
6338 tex.dst_sel_y = 7;
6339 tex.dst_sel_z = 7;
6340 tex.dst_sel_w = 7;
6341
6342 r = r600_bytecode_add_tex(ctx->bc, &tex);
6343 if (r)
6344 return r;
6345 }
6346
6347 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
6348 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
6349 inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
6350 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
6351 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY ||
6352 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
6353 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
6354 switch (opcode) {
6355 case FETCH_OP_SAMPLE:
6356 opcode = FETCH_OP_SAMPLE_C;
6357 break;
6358 case FETCH_OP_SAMPLE_L:
6359 opcode = FETCH_OP_SAMPLE_C_L;
6360 break;
6361 case FETCH_OP_SAMPLE_LB:
6362 opcode = FETCH_OP_SAMPLE_C_LB;
6363 break;
6364 case FETCH_OP_SAMPLE_G:
6365 opcode = FETCH_OP_SAMPLE_C_G;
6366 break;
6367 /* Texture gather variants */
6368 case FETCH_OP_GATHER4:
6369 opcode = FETCH_OP_GATHER4_C;
6370 break;
6371 case FETCH_OP_GATHER4_O:
6372 opcode = FETCH_OP_GATHER4_C_O;
6373 break;
6374 }
6375 }
6376
6377 memset(&tex, 0, sizeof(struct r600_bytecode_tex));
6378 tex.op = opcode;
6379
6380 tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
6381 tex.sampler_index_mode = sampler_index_mode;
6382 tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
6383 tex.resource_index_mode = sampler_index_mode;
6384 tex.src_gpr = src_gpr;
6385 tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
6386
6387 if (inst->Instruction.Opcode == TGSI_OPCODE_DDX_FINE ||
6388 inst->Instruction.Opcode == TGSI_OPCODE_DDY_FINE) {
6389 tex.inst_mod = 1; /* per pixel gradient calculation instead of per 2x2 quad */
6390 }
6391
6392 if (inst->Instruction.Opcode == TGSI_OPCODE_TG4) {
6393 int8_t texture_component_select = ctx->literals[4 * inst->Src[1].Register.Index + inst->Src[1].Register.SwizzleX];
6394 tex.inst_mod = texture_component_select;
6395
6396 if (ctx->bc->chip_class == CAYMAN) {
6397 /* GATHER4 result order is different from TGSI TG4 */
6398 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 0 : 7;
6399 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 4) ? 1 : 7;
6400 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 1) ? 2 : 7;
6401 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
6402 } else {
6403 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
6404 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
6405 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
6406 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
6407 }
6408 }
6409 else if (inst->Instruction.Opcode == TGSI_OPCODE_LODQ) {
6410 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
6411 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
6412 tex.dst_sel_z = 7;
6413 tex.dst_sel_w = 7;
6414 }
6415 else if (inst->Instruction.Opcode == TGSI_OPCODE_TXQS) {
6416 tex.dst_sel_x = 3;
6417 tex.dst_sel_y = 7;
6418 tex.dst_sel_z = 7;
6419 tex.dst_sel_w = 7;
6420 }
6421 else {
6422 tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
6423 tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
6424 tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
6425 tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
6426 }
6427
6428
6429 if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ_LZ ||
6430 inst->Instruction.Opcode == TGSI_OPCODE_TXQS) {
6431 tex.src_sel_x = 4;
6432 tex.src_sel_y = 4;
6433 tex.src_sel_z = 4;
6434 tex.src_sel_w = 4;
6435 } else if (src_loaded) {
6436 tex.src_sel_x = 0;
6437 tex.src_sel_y = 1;
6438 tex.src_sel_z = 2;
6439 tex.src_sel_w = 3;
6440 } else {
6441 tex.src_sel_x = ctx->src[0].swizzle[0];
6442 tex.src_sel_y = ctx->src[0].swizzle[1];
6443 tex.src_sel_z = ctx->src[0].swizzle[2];
6444 tex.src_sel_w = ctx->src[0].swizzle[3];
6445 tex.src_rel = ctx->src[0].rel;
6446 }
6447
6448 if (inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
6449 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
6450 inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
6451 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
6452 tex.src_sel_x = 1;
6453 tex.src_sel_y = 0;
6454 tex.src_sel_z = 3;
6455 tex.src_sel_w = 2; /* route Z compare or Lod value into W */
6456 }
6457
6458 if (inst->Texture.Texture != TGSI_TEXTURE_RECT &&
6459 inst->Texture.Texture != TGSI_TEXTURE_SHADOWRECT) {
6460 tex.coord_type_x = 1;
6461 tex.coord_type_y = 1;
6462 }
6463 tex.coord_type_z = 1;
6464 tex.coord_type_w = 1;
6465
6466 tex.offset_x = offset_x;
6467 tex.offset_y = offset_y;
6468 if (inst->Instruction.Opcode == TGSI_OPCODE_TG4 &&
6469 (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
6470 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY)) {
6471 tex.offset_z = 0;
6472 }
6473 else {
6474 tex.offset_z = offset_z;
6475 }
6476
6477 /* Put the depth for comparison in W.
6478 * TGSI_TEXTURE_SHADOW2D_ARRAY already has the depth in W.
6479 * Some instructions expect the depth in Z. */
6480 if ((inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
6481 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
6482 inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
6483 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY) &&
6484 opcode != FETCH_OP_SAMPLE_C_L &&
6485 opcode != FETCH_OP_SAMPLE_C_LB) {
6486 tex.src_sel_w = tex.src_sel_z;
6487 }
6488
6489 if (inst->Texture.Texture == TGSI_TEXTURE_1D_ARRAY ||
6490 inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY) {
6491 if (opcode == FETCH_OP_SAMPLE_C_L ||
6492 opcode == FETCH_OP_SAMPLE_C_LB) {
6493 /* the array index is read from Y */
6494 tex.coord_type_y = 0;
6495 } else {
6496 /* the array index is read from Z */
6497 tex.coord_type_z = 0;
6498 tex.src_sel_z = tex.src_sel_y;
6499 }
6500 } else if (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
6501 inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
6502 ((inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
6503 inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) &&
6504 (ctx->bc->chip_class >= EVERGREEN)))
6505 /* the array index is read from Z */
6506 tex.coord_type_z = 0;
6507
6508 /* mask unused source components */
6509 if (opcode == FETCH_OP_SAMPLE || opcode == FETCH_OP_GATHER4) {
6510 switch (inst->Texture.Texture) {
6511 case TGSI_TEXTURE_2D:
6512 case TGSI_TEXTURE_RECT:
6513 tex.src_sel_z = 7;
6514 tex.src_sel_w = 7;
6515 break;
6516 case TGSI_TEXTURE_1D_ARRAY:
6517 tex.src_sel_y = 7;
6518 tex.src_sel_w = 7;
6519 break;
6520 case TGSI_TEXTURE_1D:
6521 tex.src_sel_y = 7;
6522 tex.src_sel_z = 7;
6523 tex.src_sel_w = 7;
6524 break;
6525 }
6526 }
6527
6528 r = r600_bytecode_add_tex(ctx->bc, &tex);
6529 if (r)
6530 return r;
6531
6532 /* add shadow ambient support - gallium doesn't do it yet */
6533 return 0;
6534 }
6535
6536 static int tgsi_lrp(struct r600_shader_ctx *ctx)
6537 {
6538 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6539 struct r600_bytecode_alu alu;
6540 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
6541 unsigned i, temp_regs[2];
6542 int r;
6543
6544 /* optimize if it's just an equal balance */
6545 if (ctx->src[0].sel == V_SQ_ALU_SRC_0_5) {
6546 for (i = 0; i < lasti + 1; i++) {
6547 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6548 continue;
6549
6550 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6551 alu.op = ALU_OP2_ADD;
6552 r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
6553 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
6554 alu.omod = 3;
6555 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6556 alu.dst.chan = i;
6557 if (i == lasti) {
6558 alu.last = 1;
6559 }
6560 r = r600_bytecode_add_alu(ctx->bc, &alu);
6561 if (r)
6562 return r;
6563 }
6564 return 0;
6565 }
6566
6567 /* 1 - src0 */
6568 for (i = 0; i < lasti + 1; i++) {
6569 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6570 continue;
6571
6572 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6573 alu.op = ALU_OP2_ADD;
6574 alu.src[0].sel = V_SQ_ALU_SRC_1;
6575 alu.src[0].chan = 0;
6576 r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
6577 r600_bytecode_src_toggle_neg(&alu.src[1]);
6578 alu.dst.sel = ctx->temp_reg;
6579 alu.dst.chan = i;
6580 if (i == lasti) {
6581 alu.last = 1;
6582 }
6583 alu.dst.write = 1;
6584 r = r600_bytecode_add_alu(ctx->bc, &alu);
6585 if (r)
6586 return r;
6587 }
6588
6589 /* (1 - src0) * src2 */
6590 for (i = 0; i < lasti + 1; i++) {
6591 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6592 continue;
6593
6594 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6595 alu.op = ALU_OP2_MUL;
6596 alu.src[0].sel = ctx->temp_reg;
6597 alu.src[0].chan = i;
6598 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
6599 alu.dst.sel = ctx->temp_reg;
6600 alu.dst.chan = i;
6601 if (i == lasti) {
6602 alu.last = 1;
6603 }
6604 alu.dst.write = 1;
6605 r = r600_bytecode_add_alu(ctx->bc, &alu);
6606 if (r)
6607 return r;
6608 }
6609
6610 /* src0 * src1 + (1 - src0) * src2 */
6611 if (ctx->src[0].abs)
6612 temp_regs[0] = r600_get_temp(ctx);
6613 else
6614 temp_regs[0] = 0;
6615 if (ctx->src[1].abs)
6616 temp_regs[1] = r600_get_temp(ctx);
6617 else
6618 temp_regs[1] = 0;
6619
6620 for (i = 0; i < lasti + 1; i++) {
6621 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6622 continue;
6623
6624 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6625 alu.op = ALU_OP3_MULADD;
6626 alu.is_op3 = 1;
6627 r = tgsi_make_src_for_op3(ctx, temp_regs[0], i, &alu.src[0], &ctx->src[0]);
6628 if (r)
6629 return r;
6630 r = tgsi_make_src_for_op3(ctx, temp_regs[1], i, &alu.src[1], &ctx->src[1]);
6631 if (r)
6632 return r;
6633 alu.src[2].sel = ctx->temp_reg;
6634 alu.src[2].chan = i;
6635
6636 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6637 alu.dst.chan = i;
6638 if (i == lasti) {
6639 alu.last = 1;
6640 }
6641 r = r600_bytecode_add_alu(ctx->bc, &alu);
6642 if (r)
6643 return r;
6644 }
6645 return 0;
6646 }
6647
6648 static int tgsi_cmp(struct r600_shader_ctx *ctx)
6649 {
6650 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6651 struct r600_bytecode_alu alu;
6652 int i, r, j;
6653 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
6654 int temp_regs[3];
6655
6656 for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
6657 temp_regs[j] = 0;
6658 if (ctx->src[j].abs)
6659 temp_regs[j] = r600_get_temp(ctx);
6660 }
6661
6662 for (i = 0; i < lasti + 1; i++) {
6663 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6664 continue;
6665
6666 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6667 alu.op = ALU_OP3_CNDGE;
6668 r = tgsi_make_src_for_op3(ctx, temp_regs[0], i, &alu.src[0], &ctx->src[0]);
6669 if (r)
6670 return r;
6671 r = tgsi_make_src_for_op3(ctx, temp_regs[2], i, &alu.src[1], &ctx->src[2]);
6672 if (r)
6673 return r;
6674 r = tgsi_make_src_for_op3(ctx, temp_regs[1], i, &alu.src[2], &ctx->src[1]);
6675 if (r)
6676 return r;
6677 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6678 alu.dst.chan = i;
6679 alu.dst.write = 1;
6680 alu.is_op3 = 1;
6681 if (i == lasti)
6682 alu.last = 1;
6683 r = r600_bytecode_add_alu(ctx->bc, &alu);
6684 if (r)
6685 return r;
6686 }
6687 return 0;
6688 }
6689
6690 static int tgsi_ucmp(struct r600_shader_ctx *ctx)
6691 {
6692 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6693 struct r600_bytecode_alu alu;
6694 int i, r;
6695 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
6696
6697 for (i = 0; i < lasti + 1; i++) {
6698 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
6699 continue;
6700
6701 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6702 alu.op = ALU_OP3_CNDE_INT;
6703 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
6704 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
6705 r600_bytecode_src(&alu.src[2], &ctx->src[1], i);
6706 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6707 alu.dst.chan = i;
6708 alu.dst.write = 1;
6709 alu.is_op3 = 1;
6710 if (i == lasti)
6711 alu.last = 1;
6712 r = r600_bytecode_add_alu(ctx->bc, &alu);
6713 if (r)
6714 return r;
6715 }
6716 return 0;
6717 }
6718
6719 static int tgsi_xpd(struct r600_shader_ctx *ctx)
6720 {
6721 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6722 static const unsigned int src0_swizzle[] = {2, 0, 1};
6723 static const unsigned int src1_swizzle[] = {1, 2, 0};
6724 struct r600_bytecode_alu alu;
6725 uint32_t use_temp = 0;
6726 int i, r;
6727
6728 if (inst->Dst[0].Register.WriteMask != 0xf)
6729 use_temp = 1;
6730
6731 for (i = 0; i < 4; i++) {
6732 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6733 alu.op = ALU_OP2_MUL;
6734 if (i < 3) {
6735 r600_bytecode_src(&alu.src[0], &ctx->src[0], src0_swizzle[i]);
6736 r600_bytecode_src(&alu.src[1], &ctx->src[1], src1_swizzle[i]);
6737 } else {
6738 alu.src[0].sel = V_SQ_ALU_SRC_0;
6739 alu.src[0].chan = i;
6740 alu.src[1].sel = V_SQ_ALU_SRC_0;
6741 alu.src[1].chan = i;
6742 }
6743
6744 alu.dst.sel = ctx->temp_reg;
6745 alu.dst.chan = i;
6746 alu.dst.write = 1;
6747
6748 if (i == 3)
6749 alu.last = 1;
6750 r = r600_bytecode_add_alu(ctx->bc, &alu);
6751 if (r)
6752 return r;
6753 }
6754
6755 for (i = 0; i < 4; i++) {
6756 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6757 alu.op = ALU_OP3_MULADD;
6758
6759 if (i < 3) {
6760 r600_bytecode_src(&alu.src[0], &ctx->src[0], src1_swizzle[i]);
6761 r600_bytecode_src(&alu.src[1], &ctx->src[1], src0_swizzle[i]);
6762 } else {
6763 alu.src[0].sel = V_SQ_ALU_SRC_0;
6764 alu.src[0].chan = i;
6765 alu.src[1].sel = V_SQ_ALU_SRC_0;
6766 alu.src[1].chan = i;
6767 }
6768
6769 alu.src[2].sel = ctx->temp_reg;
6770 alu.src[2].neg = 1;
6771 alu.src[2].chan = i;
6772
6773 if (use_temp)
6774 alu.dst.sel = ctx->temp_reg;
6775 else
6776 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6777 alu.dst.chan = i;
6778 alu.dst.write = 1;
6779 alu.is_op3 = 1;
6780 if (i == 3)
6781 alu.last = 1;
6782 r = r600_bytecode_add_alu(ctx->bc, &alu);
6783 if (r)
6784 return r;
6785 }
6786 if (use_temp)
6787 return tgsi_helper_copy(ctx, inst);
6788 return 0;
6789 }
6790
6791 static int tgsi_exp(struct r600_shader_ctx *ctx)
6792 {
6793 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6794 struct r600_bytecode_alu alu;
6795 int r;
6796 int i;
6797
6798 /* result.x = 2^floor(src); */
6799 if (inst->Dst[0].Register.WriteMask & 1) {
6800 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6801
6802 alu.op = ALU_OP1_FLOOR;
6803 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6804
6805 alu.dst.sel = ctx->temp_reg;
6806 alu.dst.chan = 0;
6807 alu.dst.write = 1;
6808 alu.last = 1;
6809 r = r600_bytecode_add_alu(ctx->bc, &alu);
6810 if (r)
6811 return r;
6812
6813 if (ctx->bc->chip_class == CAYMAN) {
6814 for (i = 0; i < 3; i++) {
6815 alu.op = ALU_OP1_EXP_IEEE;
6816 alu.src[0].sel = ctx->temp_reg;
6817 alu.src[0].chan = 0;
6818
6819 alu.dst.sel = ctx->temp_reg;
6820 alu.dst.chan = i;
6821 alu.dst.write = i == 0;
6822 alu.last = i == 2;
6823 r = r600_bytecode_add_alu(ctx->bc, &alu);
6824 if (r)
6825 return r;
6826 }
6827 } else {
6828 alu.op = ALU_OP1_EXP_IEEE;
6829 alu.src[0].sel = ctx->temp_reg;
6830 alu.src[0].chan = 0;
6831
6832 alu.dst.sel = ctx->temp_reg;
6833 alu.dst.chan = 0;
6834 alu.dst.write = 1;
6835 alu.last = 1;
6836 r = r600_bytecode_add_alu(ctx->bc, &alu);
6837 if (r)
6838 return r;
6839 }
6840 }
6841
6842 /* result.y = tmp - floor(tmp); */
6843 if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
6844 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6845
6846 alu.op = ALU_OP1_FRACT;
6847 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6848
6849 alu.dst.sel = ctx->temp_reg;
6850 #if 0
6851 r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
6852 if (r)
6853 return r;
6854 #endif
6855 alu.dst.write = 1;
6856 alu.dst.chan = 1;
6857
6858 alu.last = 1;
6859
6860 r = r600_bytecode_add_alu(ctx->bc, &alu);
6861 if (r)
6862 return r;
6863 }
6864
6865 /* result.z = RoughApprox2ToX(tmp);*/
6866 if ((inst->Dst[0].Register.WriteMask >> 2) & 0x1) {
6867 if (ctx->bc->chip_class == CAYMAN) {
6868 for (i = 0; i < 3; i++) {
6869 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6870 alu.op = ALU_OP1_EXP_IEEE;
6871 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6872
6873 alu.dst.sel = ctx->temp_reg;
6874 alu.dst.chan = i;
6875 if (i == 2) {
6876 alu.dst.write = 1;
6877 alu.last = 1;
6878 }
6879
6880 r = r600_bytecode_add_alu(ctx->bc, &alu);
6881 if (r)
6882 return r;
6883 }
6884 } else {
6885 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6886 alu.op = ALU_OP1_EXP_IEEE;
6887 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6888
6889 alu.dst.sel = ctx->temp_reg;
6890 alu.dst.write = 1;
6891 alu.dst.chan = 2;
6892
6893 alu.last = 1;
6894
6895 r = r600_bytecode_add_alu(ctx->bc, &alu);
6896 if (r)
6897 return r;
6898 }
6899 }
6900
6901 /* result.w = 1.0;*/
6902 if ((inst->Dst[0].Register.WriteMask >> 3) & 0x1) {
6903 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6904
6905 alu.op = ALU_OP1_MOV;
6906 alu.src[0].sel = V_SQ_ALU_SRC_1;
6907 alu.src[0].chan = 0;
6908
6909 alu.dst.sel = ctx->temp_reg;
6910 alu.dst.chan = 3;
6911 alu.dst.write = 1;
6912 alu.last = 1;
6913 r = r600_bytecode_add_alu(ctx->bc, &alu);
6914 if (r)
6915 return r;
6916 }
6917 return tgsi_helper_copy(ctx, inst);
6918 }
6919
6920 static int tgsi_log(struct r600_shader_ctx *ctx)
6921 {
6922 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
6923 struct r600_bytecode_alu alu;
6924 int r;
6925 int i;
6926
6927 /* result.x = floor(log2(|src|)); */
6928 if (inst->Dst[0].Register.WriteMask & 1) {
6929 if (ctx->bc->chip_class == CAYMAN) {
6930 for (i = 0; i < 3; i++) {
6931 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6932
6933 alu.op = ALU_OP1_LOG_IEEE;
6934 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6935 r600_bytecode_src_set_abs(&alu.src[0]);
6936
6937 alu.dst.sel = ctx->temp_reg;
6938 alu.dst.chan = i;
6939 if (i == 0)
6940 alu.dst.write = 1;
6941 if (i == 2)
6942 alu.last = 1;
6943 r = r600_bytecode_add_alu(ctx->bc, &alu);
6944 if (r)
6945 return r;
6946 }
6947
6948 } else {
6949 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6950
6951 alu.op = ALU_OP1_LOG_IEEE;
6952 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6953 r600_bytecode_src_set_abs(&alu.src[0]);
6954
6955 alu.dst.sel = ctx->temp_reg;
6956 alu.dst.chan = 0;
6957 alu.dst.write = 1;
6958 alu.last = 1;
6959 r = r600_bytecode_add_alu(ctx->bc, &alu);
6960 if (r)
6961 return r;
6962 }
6963
6964 alu.op = ALU_OP1_FLOOR;
6965 alu.src[0].sel = ctx->temp_reg;
6966 alu.src[0].chan = 0;
6967
6968 alu.dst.sel = ctx->temp_reg;
6969 alu.dst.chan = 0;
6970 alu.dst.write = 1;
6971 alu.last = 1;
6972
6973 r = r600_bytecode_add_alu(ctx->bc, &alu);
6974 if (r)
6975 return r;
6976 }
6977
6978 /* result.y = |src.x| / (2 ^ floor(log2(|src.x|))); */
6979 if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
6980
6981 if (ctx->bc->chip_class == CAYMAN) {
6982 for (i = 0; i < 3; i++) {
6983 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
6984
6985 alu.op = ALU_OP1_LOG_IEEE;
6986 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
6987 r600_bytecode_src_set_abs(&alu.src[0]);
6988
6989 alu.dst.sel = ctx->temp_reg;
6990 alu.dst.chan = i;
6991 if (i == 1)
6992 alu.dst.write = 1;
6993 if (i == 2)
6994 alu.last = 1;
6995
6996 r = r600_bytecode_add_alu(ctx->bc, &alu);
6997 if (r)
6998 return r;
6999 }
7000 } else {
7001 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7002
7003 alu.op = ALU_OP1_LOG_IEEE;
7004 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
7005 r600_bytecode_src_set_abs(&alu.src[0]);
7006
7007 alu.dst.sel = ctx->temp_reg;
7008 alu.dst.chan = 1;
7009 alu.dst.write = 1;
7010 alu.last = 1;
7011
7012 r = r600_bytecode_add_alu(ctx->bc, &alu);
7013 if (r)
7014 return r;
7015 }
7016
7017 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7018
7019 alu.op = ALU_OP1_FLOOR;
7020 alu.src[0].sel = ctx->temp_reg;
7021 alu.src[0].chan = 1;
7022
7023 alu.dst.sel = ctx->temp_reg;
7024 alu.dst.chan = 1;
7025 alu.dst.write = 1;
7026 alu.last = 1;
7027
7028 r = r600_bytecode_add_alu(ctx->bc, &alu);
7029 if (r)
7030 return r;
7031
7032 if (ctx->bc->chip_class == CAYMAN) {
7033 for (i = 0; i < 3; i++) {
7034 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7035 alu.op = ALU_OP1_EXP_IEEE;
7036 alu.src[0].sel = ctx->temp_reg;
7037 alu.src[0].chan = 1;
7038
7039 alu.dst.sel = ctx->temp_reg;
7040 alu.dst.chan = i;
7041 if (i == 1)
7042 alu.dst.write = 1;
7043 if (i == 2)
7044 alu.last = 1;
7045
7046 r = r600_bytecode_add_alu(ctx->bc, &alu);
7047 if (r)
7048 return r;
7049 }
7050 } else {
7051 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7052 alu.op = ALU_OP1_EXP_IEEE;
7053 alu.src[0].sel = ctx->temp_reg;
7054 alu.src[0].chan = 1;
7055
7056 alu.dst.sel = ctx->temp_reg;
7057 alu.dst.chan = 1;
7058 alu.dst.write = 1;
7059 alu.last = 1;
7060
7061 r = r600_bytecode_add_alu(ctx->bc, &alu);
7062 if (r)
7063 return r;
7064 }
7065
7066 if (ctx->bc->chip_class == CAYMAN) {
7067 for (i = 0; i < 3; i++) {
7068 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7069 alu.op = ALU_OP1_RECIP_IEEE;
7070 alu.src[0].sel = ctx->temp_reg;
7071 alu.src[0].chan = 1;
7072
7073 alu.dst.sel = ctx->temp_reg;
7074 alu.dst.chan = i;
7075 if (i == 1)
7076 alu.dst.write = 1;
7077 if (i == 2)
7078 alu.last = 1;
7079
7080 r = r600_bytecode_add_alu(ctx->bc, &alu);
7081 if (r)
7082 return r;
7083 }
7084 } else {
7085 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7086 alu.op = ALU_OP1_RECIP_IEEE;
7087 alu.src[0].sel = ctx->temp_reg;
7088 alu.src[0].chan = 1;
7089
7090 alu.dst.sel = ctx->temp_reg;
7091 alu.dst.chan = 1;
7092 alu.dst.write = 1;
7093 alu.last = 1;
7094
7095 r = r600_bytecode_add_alu(ctx->bc, &alu);
7096 if (r)
7097 return r;
7098 }
7099
7100 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7101
7102 alu.op = ALU_OP2_MUL;
7103
7104 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
7105 r600_bytecode_src_set_abs(&alu.src[0]);
7106
7107 alu.src[1].sel = ctx->temp_reg;
7108 alu.src[1].chan = 1;
7109
7110 alu.dst.sel = ctx->temp_reg;
7111 alu.dst.chan = 1;
7112 alu.dst.write = 1;
7113 alu.last = 1;
7114
7115 r = r600_bytecode_add_alu(ctx->bc, &alu);
7116 if (r)
7117 return r;
7118 }
7119
7120 /* result.z = log2(|src|);*/
7121 if ((inst->Dst[0].Register.WriteMask >> 2) & 1) {
7122 if (ctx->bc->chip_class == CAYMAN) {
7123 for (i = 0; i < 3; i++) {
7124 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7125
7126 alu.op = ALU_OP1_LOG_IEEE;
7127 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
7128 r600_bytecode_src_set_abs(&alu.src[0]);
7129
7130 alu.dst.sel = ctx->temp_reg;
7131 if (i == 2)
7132 alu.dst.write = 1;
7133 alu.dst.chan = i;
7134 if (i == 2)
7135 alu.last = 1;
7136
7137 r = r600_bytecode_add_alu(ctx->bc, &alu);
7138 if (r)
7139 return r;
7140 }
7141 } else {
7142 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7143
7144 alu.op = ALU_OP1_LOG_IEEE;
7145 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
7146 r600_bytecode_src_set_abs(&alu.src[0]);
7147
7148 alu.dst.sel = ctx->temp_reg;
7149 alu.dst.write = 1;
7150 alu.dst.chan = 2;
7151 alu.last = 1;
7152
7153 r = r600_bytecode_add_alu(ctx->bc, &alu);
7154 if (r)
7155 return r;
7156 }
7157 }
7158
7159 /* result.w = 1.0; */
7160 if ((inst->Dst[0].Register.WriteMask >> 3) & 1) {
7161 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7162
7163 alu.op = ALU_OP1_MOV;
7164 alu.src[0].sel = V_SQ_ALU_SRC_1;
7165 alu.src[0].chan = 0;
7166
7167 alu.dst.sel = ctx->temp_reg;
7168 alu.dst.chan = 3;
7169 alu.dst.write = 1;
7170 alu.last = 1;
7171
7172 r = r600_bytecode_add_alu(ctx->bc, &alu);
7173 if (r)
7174 return r;
7175 }
7176
7177 return tgsi_helper_copy(ctx, inst);
7178 }
7179
7180 static int tgsi_eg_arl(struct r600_shader_ctx *ctx)
7181 {
7182 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7183 struct r600_bytecode_alu alu;
7184 int r;
7185 int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
7186 unsigned reg = inst->Dst[0].Register.Index > 0 ? ctx->bc->index_reg[inst->Dst[0].Register.Index - 1] : ctx->bc->ar_reg;
7187
7188 assert(inst->Dst[0].Register.Index < 3);
7189 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7190
7191 switch (inst->Instruction.Opcode) {
7192 case TGSI_OPCODE_ARL:
7193 alu.op = ALU_OP1_FLT_TO_INT_FLOOR;
7194 break;
7195 case TGSI_OPCODE_ARR:
7196 alu.op = ALU_OP1_FLT_TO_INT;
7197 break;
7198 case TGSI_OPCODE_UARL:
7199 alu.op = ALU_OP1_MOV;
7200 break;
7201 default:
7202 assert(0);
7203 return -1;
7204 }
7205
7206 for (i = 0; i <= lasti; ++i) {
7207 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
7208 continue;
7209 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7210 alu.last = i == lasti;
7211 alu.dst.sel = reg;
7212 alu.dst.chan = i;
7213 alu.dst.write = 1;
7214 r = r600_bytecode_add_alu(ctx->bc, &alu);
7215 if (r)
7216 return r;
7217 }
7218
7219 if (inst->Dst[0].Register.Index > 0)
7220 ctx->bc->index_loaded[inst->Dst[0].Register.Index - 1] = 0;
7221 else
7222 ctx->bc->ar_loaded = 0;
7223
7224 return 0;
7225 }
7226 static int tgsi_r600_arl(struct r600_shader_ctx *ctx)
7227 {
7228 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7229 struct r600_bytecode_alu alu;
7230 int r;
7231 int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
7232
7233 switch (inst->Instruction.Opcode) {
7234 case TGSI_OPCODE_ARL:
7235 memset(&alu, 0, sizeof(alu));
7236 alu.op = ALU_OP1_FLOOR;
7237 alu.dst.sel = ctx->bc->ar_reg;
7238 alu.dst.write = 1;
7239 for (i = 0; i <= lasti; ++i) {
7240 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
7241 alu.dst.chan = i;
7242 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7243 alu.last = i == lasti;
7244 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
7245 return r;
7246 }
7247 }
7248
7249 memset(&alu, 0, sizeof(alu));
7250 alu.op = ALU_OP1_FLT_TO_INT;
7251 alu.src[0].sel = ctx->bc->ar_reg;
7252 alu.dst.sel = ctx->bc->ar_reg;
7253 alu.dst.write = 1;
7254 /* FLT_TO_INT is trans-only on r600/r700 */
7255 alu.last = TRUE;
7256 for (i = 0; i <= lasti; ++i) {
7257 alu.dst.chan = i;
7258 alu.src[0].chan = i;
7259 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
7260 return r;
7261 }
7262 break;
7263 case TGSI_OPCODE_ARR:
7264 memset(&alu, 0, sizeof(alu));
7265 alu.op = ALU_OP1_FLT_TO_INT;
7266 alu.dst.sel = ctx->bc->ar_reg;
7267 alu.dst.write = 1;
7268 /* FLT_TO_INT is trans-only on r600/r700 */
7269 alu.last = TRUE;
7270 for (i = 0; i <= lasti; ++i) {
7271 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
7272 alu.dst.chan = i;
7273 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7274 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
7275 return r;
7276 }
7277 }
7278 break;
7279 case TGSI_OPCODE_UARL:
7280 memset(&alu, 0, sizeof(alu));
7281 alu.op = ALU_OP1_MOV;
7282 alu.dst.sel = ctx->bc->ar_reg;
7283 alu.dst.write = 1;
7284 for (i = 0; i <= lasti; ++i) {
7285 if (inst->Dst[0].Register.WriteMask & (1 << i)) {
7286 alu.dst.chan = i;
7287 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7288 alu.last = i == lasti;
7289 if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
7290 return r;
7291 }
7292 }
7293 break;
7294 default:
7295 assert(0);
7296 return -1;
7297 }
7298
7299 ctx->bc->ar_loaded = 0;
7300 return 0;
7301 }
7302
7303 static int tgsi_opdst(struct r600_shader_ctx *ctx)
7304 {
7305 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7306 struct r600_bytecode_alu alu;
7307 int i, r = 0;
7308
7309 for (i = 0; i < 4; i++) {
7310 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7311
7312 alu.op = ALU_OP2_MUL;
7313 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
7314
7315 if (i == 0 || i == 3) {
7316 alu.src[0].sel = V_SQ_ALU_SRC_1;
7317 } else {
7318 r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
7319 }
7320
7321 if (i == 0 || i == 2) {
7322 alu.src[1].sel = V_SQ_ALU_SRC_1;
7323 } else {
7324 r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
7325 }
7326 if (i == 3)
7327 alu.last = 1;
7328 r = r600_bytecode_add_alu(ctx->bc, &alu);
7329 if (r)
7330 return r;
7331 }
7332 return 0;
7333 }
7334
7335 static int emit_logic_pred(struct r600_shader_ctx *ctx, int opcode, int alu_type)
7336 {
7337 struct r600_bytecode_alu alu;
7338 int r;
7339
7340 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7341 alu.op = opcode;
7342 alu.execute_mask = 1;
7343 alu.update_pred = 1;
7344
7345 alu.dst.sel = ctx->temp_reg;
7346 alu.dst.write = 1;
7347 alu.dst.chan = 0;
7348
7349 r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
7350 alu.src[1].sel = V_SQ_ALU_SRC_0;
7351 alu.src[1].chan = 0;
7352
7353 alu.last = 1;
7354
7355 r = r600_bytecode_add_alu_type(ctx->bc, &alu, alu_type);
7356 if (r)
7357 return r;
7358 return 0;
7359 }
7360
7361 static int pops(struct r600_shader_ctx *ctx, int pops)
7362 {
7363 unsigned force_pop = ctx->bc->force_add_cf;
7364
7365 if (!force_pop) {
7366 int alu_pop = 3;
7367 if (ctx->bc->cf_last) {
7368 if (ctx->bc->cf_last->op == CF_OP_ALU)
7369 alu_pop = 0;
7370 else if (ctx->bc->cf_last->op == CF_OP_ALU_POP_AFTER)
7371 alu_pop = 1;
7372 }
7373 alu_pop += pops;
7374 if (alu_pop == 1) {
7375 ctx->bc->cf_last->op = CF_OP_ALU_POP_AFTER;
7376 ctx->bc->force_add_cf = 1;
7377 } else if (alu_pop == 2) {
7378 ctx->bc->cf_last->op = CF_OP_ALU_POP2_AFTER;
7379 ctx->bc->force_add_cf = 1;
7380 } else {
7381 force_pop = 1;
7382 }
7383 }
7384
7385 if (force_pop) {
7386 r600_bytecode_add_cfinst(ctx->bc, CF_OP_POP);
7387 ctx->bc->cf_last->pop_count = pops;
7388 ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2;
7389 }
7390
7391 return 0;
7392 }
7393
7394 static inline void callstack_update_max_depth(struct r600_shader_ctx *ctx,
7395 unsigned reason)
7396 {
7397 struct r600_stack_info *stack = &ctx->bc->stack;
7398 unsigned elements, entries;
7399
7400 unsigned entry_size = stack->entry_size;
7401
7402 elements = (stack->loop + stack->push_wqm ) * entry_size;
7403 elements += stack->push;
7404
7405 switch (ctx->bc->chip_class) {
7406 case R600:
7407 case R700:
7408 /* pre-r8xx: if any non-WQM PUSH instruction is invoked, 2 elements on
7409 * the stack must be reserved to hold the current active/continue
7410 * masks */
7411 if (reason == FC_PUSH_VPM) {
7412 elements += 2;
7413 }
7414 break;
7415
7416 case CAYMAN:
7417 /* r9xx: any stack operation on empty stack consumes 2 additional
7418 * elements */
7419 elements += 2;
7420
7421 /* fallthrough */
7422 /* FIXME: do the two elements added above cover the cases for the
7423 * r8xx+ below? */
7424
7425 case EVERGREEN:
7426 /* r8xx+: 2 extra elements are not always required, but one extra
7427 * element must be added for each of the following cases:
7428 * 1. There is an ALU_ELSE_AFTER instruction at the point of greatest
7429 * stack usage.
7430 * (Currently we don't use ALU_ELSE_AFTER.)
7431 * 2. There are LOOP/WQM frames on the stack when any flavor of non-WQM
7432 * PUSH instruction executed.
7433 *
7434 * NOTE: it seems we also need to reserve additional element in some
7435 * other cases, e.g. when we have 4 levels of PUSH_VPM in the shader,
7436 * then STACK_SIZE should be 2 instead of 1 */
7437 if (reason == FC_PUSH_VPM) {
7438 elements += 1;
7439 }
7440 break;
7441
7442 default:
7443 assert(0);
7444 break;
7445 }
7446
7447 /* NOTE: it seems STACK_SIZE is interpreted by hw as if entry_size is 4
7448 * for all chips, so we use 4 in the final formula, not the real entry_size
7449 * for the chip */
7450 entry_size = 4;
7451
7452 entries = (elements + (entry_size - 1)) / entry_size;
7453
7454 if (entries > stack->max_entries)
7455 stack->max_entries = entries;
7456 }
7457
7458 static inline void callstack_pop(struct r600_shader_ctx *ctx, unsigned reason)
7459 {
7460 switch(reason) {
7461 case FC_PUSH_VPM:
7462 --ctx->bc->stack.push;
7463 assert(ctx->bc->stack.push >= 0);
7464 break;
7465 case FC_PUSH_WQM:
7466 --ctx->bc->stack.push_wqm;
7467 assert(ctx->bc->stack.push_wqm >= 0);
7468 break;
7469 case FC_LOOP:
7470 --ctx->bc->stack.loop;
7471 assert(ctx->bc->stack.loop >= 0);
7472 break;
7473 default:
7474 assert(0);
7475 break;
7476 }
7477 }
7478
7479 static inline void callstack_push(struct r600_shader_ctx *ctx, unsigned reason)
7480 {
7481 switch (reason) {
7482 case FC_PUSH_VPM:
7483 ++ctx->bc->stack.push;
7484 break;
7485 case FC_PUSH_WQM:
7486 ++ctx->bc->stack.push_wqm;
7487 case FC_LOOP:
7488 ++ctx->bc->stack.loop;
7489 break;
7490 default:
7491 assert(0);
7492 }
7493
7494 callstack_update_max_depth(ctx, reason);
7495 }
7496
7497 static void fc_set_mid(struct r600_shader_ctx *ctx, int fc_sp)
7498 {
7499 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[fc_sp];
7500
7501 sp->mid = realloc((void *)sp->mid,
7502 sizeof(struct r600_bytecode_cf *) * (sp->num_mid + 1));
7503 sp->mid[sp->num_mid] = ctx->bc->cf_last;
7504 sp->num_mid++;
7505 }
7506
7507 static void fc_pushlevel(struct r600_shader_ctx *ctx, int type)
7508 {
7509 ctx->bc->fc_sp++;
7510 ctx->bc->fc_stack[ctx->bc->fc_sp].type = type;
7511 ctx->bc->fc_stack[ctx->bc->fc_sp].start = ctx->bc->cf_last;
7512 }
7513
7514 static void fc_poplevel(struct r600_shader_ctx *ctx)
7515 {
7516 struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[ctx->bc->fc_sp];
7517 free(sp->mid);
7518 sp->mid = NULL;
7519 sp->num_mid = 0;
7520 sp->start = NULL;
7521 sp->type = 0;
7522 ctx->bc->fc_sp--;
7523 }
7524
7525 #if 0
7526 static int emit_return(struct r600_shader_ctx *ctx)
7527 {
7528 r600_bytecode_add_cfinst(ctx->bc, CF_OP_RETURN));
7529 return 0;
7530 }
7531
7532 static int emit_jump_to_offset(struct r600_shader_ctx *ctx, int pops, int offset)
7533 {
7534
7535 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP));
7536 ctx->bc->cf_last->pop_count = pops;
7537 /* XXX work out offset */
7538 return 0;
7539 }
7540
7541 static int emit_setret_in_loop_flag(struct r600_shader_ctx *ctx, unsigned flag_value)
7542 {
7543 return 0;
7544 }
7545
7546 static void emit_testflag(struct r600_shader_ctx *ctx)
7547 {
7548
7549 }
7550
7551 static void emit_return_on_flag(struct r600_shader_ctx *ctx, unsigned ifidx)
7552 {
7553 emit_testflag(ctx);
7554 emit_jump_to_offset(ctx, 1, 4);
7555 emit_setret_in_loop_flag(ctx, V_SQ_ALU_SRC_0);
7556 pops(ctx, ifidx + 1);
7557 emit_return(ctx);
7558 }
7559
7560 static void break_loop_on_flag(struct r600_shader_ctx *ctx, unsigned fc_sp)
7561 {
7562 emit_testflag(ctx);
7563
7564 r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
7565 ctx->bc->cf_last->pop_count = 1;
7566
7567 fc_set_mid(ctx, fc_sp);
7568
7569 pops(ctx, 1);
7570 }
7571 #endif
7572
7573 static int emit_if(struct r600_shader_ctx *ctx, int opcode)
7574 {
7575 int alu_type = CF_OP_ALU_PUSH_BEFORE;
7576
7577 /* There is a hardware bug on Cayman where a BREAK/CONTINUE followed by
7578 * LOOP_STARTxxx for nested loops may put the branch stack into a state
7579 * such that ALU_PUSH_BEFORE doesn't work as expected. Workaround this
7580 * by replacing the ALU_PUSH_BEFORE with a PUSH + ALU */
7581 if (ctx->bc->chip_class == CAYMAN && ctx->bc->stack.loop > 1) {
7582 r600_bytecode_add_cfinst(ctx->bc, CF_OP_PUSH);
7583 ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2;
7584 alu_type = CF_OP_ALU;
7585 }
7586
7587 emit_logic_pred(ctx, opcode, alu_type);
7588
7589 r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP);
7590
7591 fc_pushlevel(ctx, FC_IF);
7592
7593 callstack_push(ctx, FC_PUSH_VPM);
7594 return 0;
7595 }
7596
7597 static int tgsi_if(struct r600_shader_ctx *ctx)
7598 {
7599 return emit_if(ctx, ALU_OP2_PRED_SETNE);
7600 }
7601
7602 static int tgsi_uif(struct r600_shader_ctx *ctx)
7603 {
7604 return emit_if(ctx, ALU_OP2_PRED_SETNE_INT);
7605 }
7606
7607 static int tgsi_else(struct r600_shader_ctx *ctx)
7608 {
7609 r600_bytecode_add_cfinst(ctx->bc, CF_OP_ELSE);
7610 ctx->bc->cf_last->pop_count = 1;
7611
7612 fc_set_mid(ctx, ctx->bc->fc_sp);
7613 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id;
7614 return 0;
7615 }
7616
7617 static int tgsi_endif(struct r600_shader_ctx *ctx)
7618 {
7619 pops(ctx, 1);
7620 if (ctx->bc->fc_stack[ctx->bc->fc_sp].type != FC_IF) {
7621 R600_ERR("if/endif unbalanced in shader\n");
7622 return -1;
7623 }
7624
7625 if (ctx->bc->fc_stack[ctx->bc->fc_sp].mid == NULL) {
7626 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id + 2;
7627 ctx->bc->fc_stack[ctx->bc->fc_sp].start->pop_count = 1;
7628 } else {
7629 ctx->bc->fc_stack[ctx->bc->fc_sp].mid[0]->cf_addr = ctx->bc->cf_last->id + 2;
7630 }
7631 fc_poplevel(ctx);
7632
7633 callstack_pop(ctx, FC_PUSH_VPM);
7634 return 0;
7635 }
7636
7637 static int tgsi_bgnloop(struct r600_shader_ctx *ctx)
7638 {
7639 /* LOOP_START_DX10 ignores the LOOP_CONFIG* registers, so it is not
7640 * limited to 4096 iterations, like the other LOOP_* instructions. */
7641 r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_START_DX10);
7642
7643 fc_pushlevel(ctx, FC_LOOP);
7644
7645 /* check stack depth */
7646 callstack_push(ctx, FC_LOOP);
7647 return 0;
7648 }
7649
7650 static int tgsi_endloop(struct r600_shader_ctx *ctx)
7651 {
7652 int i;
7653
7654 r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_END);
7655
7656 if (ctx->bc->fc_stack[ctx->bc->fc_sp].type != FC_LOOP) {
7657 R600_ERR("loop/endloop in shader code are not paired.\n");
7658 return -EINVAL;
7659 }
7660
7661 /* fixup loop pointers - from r600isa
7662 LOOP END points to CF after LOOP START,
7663 LOOP START point to CF after LOOP END
7664 BRK/CONT point to LOOP END CF
7665 */
7666 ctx->bc->cf_last->cf_addr = ctx->bc->fc_stack[ctx->bc->fc_sp].start->id + 2;
7667
7668 ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id + 2;
7669
7670 for (i = 0; i < ctx->bc->fc_stack[ctx->bc->fc_sp].num_mid; i++) {
7671 ctx->bc->fc_stack[ctx->bc->fc_sp].mid[i]->cf_addr = ctx->bc->cf_last->id;
7672 }
7673 /* XXX add LOOPRET support */
7674 fc_poplevel(ctx);
7675 callstack_pop(ctx, FC_LOOP);
7676 return 0;
7677 }
7678
7679 static int tgsi_loop_breakc(struct r600_shader_ctx *ctx)
7680 {
7681 int r;
7682 unsigned int fscp;
7683
7684 for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--)
7685 {
7686 if (FC_LOOP == ctx->bc->fc_stack[fscp].type)
7687 break;
7688 }
7689 if (fscp == 0) {
7690 R600_ERR("BREAKC not inside loop/endloop pair\n");
7691 return -EINVAL;
7692 }
7693
7694 if (ctx->bc->chip_class == EVERGREEN &&
7695 ctx->bc->family != CHIP_CYPRESS &&
7696 ctx->bc->family != CHIP_JUNIPER) {
7697 /* HW bug: ALU_BREAK does not save the active mask correctly */
7698 r = tgsi_uif(ctx);
7699 if (r)
7700 return r;
7701
7702 r = r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_BREAK);
7703 if (r)
7704 return r;
7705 fc_set_mid(ctx, fscp);
7706
7707 return tgsi_endif(ctx);
7708 } else {
7709 r = emit_logic_pred(ctx, ALU_OP2_PRED_SETE_INT, CF_OP_ALU_BREAK);
7710 if (r)
7711 return r;
7712 fc_set_mid(ctx, fscp);
7713 }
7714
7715 return 0;
7716 }
7717
7718 static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx)
7719 {
7720 unsigned int fscp;
7721
7722 for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--)
7723 {
7724 if (FC_LOOP == ctx->bc->fc_stack[fscp].type)
7725 break;
7726 }
7727
7728 if (fscp == 0) {
7729 R600_ERR("Break not inside loop/endloop pair\n");
7730 return -EINVAL;
7731 }
7732
7733 r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
7734
7735 fc_set_mid(ctx, fscp);
7736
7737 return 0;
7738 }
7739
7740 static int tgsi_gs_emit(struct r600_shader_ctx *ctx)
7741 {
7742 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7743 int stream = ctx->literals[inst->Src[0].Register.Index * 4 + inst->Src[0].Register.SwizzleX];
7744 int r;
7745
7746 if (ctx->inst_info->op == CF_OP_EMIT_VERTEX)
7747 emit_gs_ring_writes(ctx, ctx->gs_stream_output_info, stream, TRUE);
7748
7749 r = r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
7750 if (!r)
7751 ctx->bc->cf_last->count = stream; // Count field for CUT/EMIT_VERTEX indicates which stream
7752 return r;
7753 }
7754
7755 static int tgsi_umad(struct r600_shader_ctx *ctx)
7756 {
7757 struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
7758 struct r600_bytecode_alu alu;
7759 int i, j, k, r;
7760 int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
7761
7762 /* src0 * src1 */
7763 for (i = 0; i < lasti + 1; i++) {
7764 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
7765 continue;
7766
7767 if (ctx->bc->chip_class == CAYMAN) {
7768 for (j = 0 ; j < 4; j++) {
7769 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7770
7771 alu.op = ALU_OP2_MULLO_UINT;
7772 for (k = 0; k < inst->Instruction.NumSrcRegs; k++) {
7773 r600_bytecode_src(&alu.src[k], &ctx->src[k], i);
7774 }
7775 alu.dst.chan = j;
7776 alu.dst.sel = ctx->temp_reg;
7777 alu.dst.write = (j == i);
7778 if (j == 3)
7779 alu.last = 1;
7780 r = r600_bytecode_add_alu(ctx->bc, &alu);
7781 if (r)
7782 return r;
7783 }
7784 } else {
7785 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7786
7787 alu.dst.chan = i;
7788 alu.dst.sel = ctx->temp_reg;
7789 alu.dst.write = 1;
7790
7791 alu.op = ALU_OP2_MULLO_UINT;
7792 for (j = 0; j < 2; j++) {
7793 r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
7794 }
7795
7796 alu.last = 1;
7797 r = r600_bytecode_add_alu(ctx->bc, &alu);
7798 if (r)
7799 return r;
7800 }
7801 }
7802
7803
7804 for (i = 0; i < lasti + 1; i++) {
7805 if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
7806 continue;
7807
7808 memset(&alu, 0, sizeof(struct r600_bytecode_alu));
7809 tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
7810
7811 alu.op = ALU_OP2_ADD_INT;
7812
7813 alu.src[0].sel = ctx->temp_reg;
7814 alu.src[0].chan = i;
7815
7816 r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
7817 if (i == lasti) {
7818 alu.last = 1;
7819 }
7820 r = r600_bytecode_add_alu(ctx->bc, &alu);
7821 if (r)
7822 return r;
7823 }
7824 return 0;
7825 }
7826
7827 static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] = {
7828 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_r600_arl},
7829 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
7830 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
7831
7832 /* XXX:
7833 * For state trackers other than OpenGL, we'll want to use
7834 * _RECIP_IEEE instead.
7835 */
7836 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_CLAMPED, tgsi_trans_srcx_replicate},
7837
7838 [TGSI_OPCODE_RSQ] = { ALU_OP0_NOP, tgsi_rsq},
7839 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
7840 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
7841 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL, tgsi_op2},
7842 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
7843 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4, tgsi_dp},
7844 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4, tgsi_dp},
7845 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
7846 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN, tgsi_op2},
7847 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX, tgsi_op2},
7848 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
7849 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
7850 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD, tgsi_op3},
7851 [TGSI_OPCODE_SUB] = { ALU_OP2_ADD, tgsi_op2},
7852 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
7853 [TGSI_OPCODE_FMA] = { ALU_OP0_NOP, tgsi_unsupported},
7854 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, tgsi_trans_srcx_replicate},
7855 [TGSI_OPCODE_DP2A] = { ALU_OP0_NOP, tgsi_unsupported},
7856 [22] = { ALU_OP0_NOP, tgsi_unsupported},
7857 [23] = { ALU_OP0_NOP, tgsi_unsupported},
7858 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
7859 [TGSI_OPCODE_CLAMP] = { ALU_OP0_NOP, tgsi_unsupported},
7860 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
7861 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
7862 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, tgsi_trans_srcx_replicate},
7863 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, tgsi_trans_srcx_replicate},
7864 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, tgsi_pow},
7865 [TGSI_OPCODE_XPD] = { ALU_OP0_NOP, tgsi_xpd},
7866 [32] = { ALU_OP0_NOP, tgsi_unsupported},
7867 [TGSI_OPCODE_ABS] = { ALU_OP1_MOV, tgsi_op2},
7868 [34] = { ALU_OP0_NOP, tgsi_unsupported},
7869 [TGSI_OPCODE_DPH] = { ALU_OP2_DOT4, tgsi_dp},
7870 [TGSI_OPCODE_COS] = { ALU_OP1_COS, tgsi_trig},
7871 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
7872 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
7873 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
7874 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_unsupported},
7875 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
7876 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
7877 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
7878 [44] = { ALU_OP0_NOP, tgsi_unsupported},
7879 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
7880 [46] = { ALU_OP0_NOP, tgsi_unsupported},
7881 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
7882 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, tgsi_trig},
7883 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
7884 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
7885 [51] = { ALU_OP0_NOP, tgsi_unsupported},
7886 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
7887 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
7888 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
7889 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_unsupported},
7890 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
7891 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
7892 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
7893 [59] = { ALU_OP0_NOP, tgsi_unsupported},
7894 [60] = { ALU_OP0_NOP, tgsi_unsupported},
7895 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_r600_arl},
7896 [62] = { ALU_OP0_NOP, tgsi_unsupported},
7897 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
7898 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
7899 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
7900 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
7901 [TGSI_OPCODE_SCS] = { ALU_OP0_NOP, tgsi_scs},
7902 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
7903 [69] = { ALU_OP0_NOP, tgsi_unsupported},
7904 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
7905 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4, tgsi_dp},
7906 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
7907 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
7908 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
7909 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
7910 [76] = { ALU_OP0_NOP, tgsi_unsupported},
7911 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
7912 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
7913 [TGSI_OPCODE_DDX_FINE] = { ALU_OP0_NOP, tgsi_unsupported},
7914 [TGSI_OPCODE_DDY_FINE] = { ALU_OP0_NOP, tgsi_unsupported},
7915 [TGSI_OPCODE_PUSHA] = { ALU_OP0_NOP, tgsi_unsupported},
7916 [TGSI_OPCODE_POPA] = { ALU_OP0_NOP, tgsi_unsupported},
7917 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
7918 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2_trans},
7919 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
7920 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
7921 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2_trans},
7922 [88] = { ALU_OP0_NOP, tgsi_unsupported},
7923 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
7924 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
7925 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
7926 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
7927 [TGSI_OPCODE_SAD] = { ALU_OP0_NOP, tgsi_unsupported},
7928 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
7929 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
7930 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
7931 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
7932 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
7933 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
7934 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
7935 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
7936 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
7937 [TGSI_OPCODE_TXQ_LZ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
7938 [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
7939 [105] = { ALU_OP0_NOP, tgsi_unsupported},
7940 [106] = { ALU_OP0_NOP, tgsi_unsupported},
7941 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
7942 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
7943 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
7944 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
7945 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
7946 [112] = { ALU_OP0_NOP, tgsi_unsupported},
7947 [TGSI_OPCODE_CALLNZ] = { ALU_OP0_NOP, tgsi_unsupported},
7948 [114] = { ALU_OP0_NOP, tgsi_unsupported},
7949 [TGSI_OPCODE_BREAKC] = { ALU_OP0_NOP, tgsi_loop_breakc},
7950 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
7951 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
7952 [118] = { ALU_OP0_NOP, tgsi_unsupported},
7953 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_op2_trans},
7954 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
7955 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
7956 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
7957 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
7958 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
7959 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2_trans},
7960 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
7961 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_op2_trans},
7962 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2_trans},
7963 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
7964 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
7965 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
7966 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
7967 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
7968 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
7969 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_UINT, tgsi_op2_trans},
7970 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
7971 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
7972 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2_trans},
7973 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
7974 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2_swap},
7975 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
7976 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
7977 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
7978 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
7979 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
7980 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
7981 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
7982 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
7983 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
7984 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
7985 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
7986 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
7987 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
7988 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
7989 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
7990 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
7991 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_r600_arl},
7992 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
7993 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
7994 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
7995 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_unsupported},
7996 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_unsupported},
7997 [TGSI_OPCODE_MFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
7998 [TGSI_OPCODE_LFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
7999 [TGSI_OPCODE_SFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8000 [TGSI_OPCODE_BARRIER] = { ALU_OP0_NOP, tgsi_unsupported},
8001 [TGSI_OPCODE_ATOMUADD] = { ALU_OP0_NOP, tgsi_unsupported},
8002 [TGSI_OPCODE_ATOMXCHG] = { ALU_OP0_NOP, tgsi_unsupported},
8003 [TGSI_OPCODE_ATOMCAS] = { ALU_OP0_NOP, tgsi_unsupported},
8004 [TGSI_OPCODE_ATOMAND] = { ALU_OP0_NOP, tgsi_unsupported},
8005 [TGSI_OPCODE_ATOMOR] = { ALU_OP0_NOP, tgsi_unsupported},
8006 [TGSI_OPCODE_ATOMXOR] = { ALU_OP0_NOP, tgsi_unsupported},
8007 [TGSI_OPCODE_ATOMUMIN] = { ALU_OP0_NOP, tgsi_unsupported},
8008 [TGSI_OPCODE_ATOMUMAX] = { ALU_OP0_NOP, tgsi_unsupported},
8009 [TGSI_OPCODE_ATOMIMIN] = { ALU_OP0_NOP, tgsi_unsupported},
8010 [TGSI_OPCODE_ATOMIMAX] = { ALU_OP0_NOP, tgsi_unsupported},
8011 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
8012 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
8013 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
8014 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, tgsi_op2_trans},
8015 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, tgsi_op2_trans},
8016 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_unsupported},
8017 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_unsupported},
8018 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_unsupported},
8019 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_unsupported},
8020 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_unsupported},
8021 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_unsupported},
8022 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_unsupported},
8023 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_unsupported},
8024 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_unsupported},
8025 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_unsupported},
8026 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_unsupported},
8027 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_unsupported},
8028 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_unsupported},
8029 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
8030 };
8031
8032 static const struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = {
8033 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_eg_arl},
8034 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
8035 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
8036 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, tgsi_trans_srcx_replicate},
8037 [TGSI_OPCODE_RSQ] = { ALU_OP1_RECIPSQRT_IEEE, tgsi_rsq},
8038 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
8039 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
8040 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL, tgsi_op2},
8041 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
8042 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4, tgsi_dp},
8043 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4, tgsi_dp},
8044 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
8045 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN, tgsi_op2},
8046 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX, tgsi_op2},
8047 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
8048 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
8049 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD, tgsi_op3},
8050 [TGSI_OPCODE_SUB] = { ALU_OP2_ADD, tgsi_op2},
8051 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
8052 [TGSI_OPCODE_FMA] = { ALU_OP0_NOP, tgsi_unsupported},
8053 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, tgsi_trans_srcx_replicate},
8054 [TGSI_OPCODE_DP2A] = { ALU_OP0_NOP, tgsi_unsupported},
8055 [22] = { ALU_OP0_NOP, tgsi_unsupported},
8056 [23] = { ALU_OP0_NOP, tgsi_unsupported},
8057 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
8058 [TGSI_OPCODE_CLAMP] = { ALU_OP0_NOP, tgsi_unsupported},
8059 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
8060 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
8061 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, tgsi_trans_srcx_replicate},
8062 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, tgsi_trans_srcx_replicate},
8063 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, tgsi_pow},
8064 [TGSI_OPCODE_XPD] = { ALU_OP0_NOP, tgsi_xpd},
8065 [32] = { ALU_OP0_NOP, tgsi_unsupported},
8066 [TGSI_OPCODE_ABS] = { ALU_OP1_MOV, tgsi_op2},
8067 [34] = { ALU_OP0_NOP, tgsi_unsupported},
8068 [TGSI_OPCODE_DPH] = { ALU_OP2_DOT4, tgsi_dp},
8069 [TGSI_OPCODE_COS] = { ALU_OP1_COS, tgsi_trig},
8070 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
8071 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
8072 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
8073 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_unsupported},
8074 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
8075 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
8076 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
8077 [44] = { ALU_OP0_NOP, tgsi_unsupported},
8078 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
8079 [46] = { ALU_OP0_NOP, tgsi_unsupported},
8080 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
8081 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, tgsi_trig},
8082 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
8083 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
8084 [51] = { ALU_OP0_NOP, tgsi_unsupported},
8085 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
8086 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
8087 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
8088 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_unsupported},
8089 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
8090 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
8091 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
8092 [59] = { ALU_OP0_NOP, tgsi_unsupported},
8093 [60] = { ALU_OP0_NOP, tgsi_unsupported},
8094 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_eg_arl},
8095 [62] = { ALU_OP0_NOP, tgsi_unsupported},
8096 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
8097 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
8098 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
8099 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
8100 [TGSI_OPCODE_SCS] = { ALU_OP0_NOP, tgsi_scs},
8101 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
8102 [69] = { ALU_OP0_NOP, tgsi_unsupported},
8103 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
8104 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4, tgsi_dp},
8105 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
8106 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
8107 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
8108 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
8109 [76] = { ALU_OP0_NOP, tgsi_unsupported},
8110 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
8111 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
8112 [TGSI_OPCODE_DDX_FINE] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
8113 [TGSI_OPCODE_DDY_FINE] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
8114 [TGSI_OPCODE_PUSHA] = { ALU_OP0_NOP, tgsi_unsupported},
8115 [TGSI_OPCODE_POPA] = { ALU_OP0_NOP, tgsi_unsupported},
8116 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
8117 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2_trans},
8118 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
8119 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
8120 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2},
8121 [88] = { ALU_OP0_NOP, tgsi_unsupported},
8122 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
8123 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
8124 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
8125 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
8126 [TGSI_OPCODE_SAD] = { ALU_OP0_NOP, tgsi_unsupported},
8127 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
8128 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
8129 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
8130 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
8131 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
8132 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
8133 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
8134 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
8135 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
8136 [TGSI_OPCODE_TXQ_LZ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
8137 [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
8138 [105] = { ALU_OP0_NOP, tgsi_unsupported},
8139 [106] = { ALU_OP0_NOP, tgsi_unsupported},
8140 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
8141 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
8142 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
8143 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
8144 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
8145 [112] = { ALU_OP0_NOP, tgsi_unsupported},
8146 [TGSI_OPCODE_CALLNZ] = { ALU_OP0_NOP, tgsi_unsupported},
8147 [114] = { ALU_OP0_NOP, tgsi_unsupported},
8148 [TGSI_OPCODE_BREAKC] = { ALU_OP0_NOP, tgsi_unsupported},
8149 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
8150 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
8151 [118] = { ALU_OP0_NOP, tgsi_unsupported},
8152 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_f2i},
8153 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
8154 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
8155 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
8156 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
8157 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
8158 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2},
8159 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
8160 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_f2i},
8161 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2_trans},
8162 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
8163 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
8164 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
8165 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
8166 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
8167 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
8168 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_UINT, tgsi_op2_trans},
8169 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
8170 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
8171 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2},
8172 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
8173 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2},
8174 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
8175 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
8176 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
8177 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
8178 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
8179 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
8180 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
8181 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
8182 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
8183 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
8184 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
8185 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
8186 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
8187 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
8188 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
8189 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
8190 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_eg_arl},
8191 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
8192 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
8193 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
8194 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_unsupported},
8195 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_unsupported},
8196 [TGSI_OPCODE_MFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8197 [TGSI_OPCODE_LFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8198 [TGSI_OPCODE_SFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8199 [TGSI_OPCODE_BARRIER] = { ALU_OP0_NOP, tgsi_unsupported},
8200 [TGSI_OPCODE_ATOMUADD] = { ALU_OP0_NOP, tgsi_unsupported},
8201 [TGSI_OPCODE_ATOMXCHG] = { ALU_OP0_NOP, tgsi_unsupported},
8202 [TGSI_OPCODE_ATOMCAS] = { ALU_OP0_NOP, tgsi_unsupported},
8203 [TGSI_OPCODE_ATOMAND] = { ALU_OP0_NOP, tgsi_unsupported},
8204 [TGSI_OPCODE_ATOMOR] = { ALU_OP0_NOP, tgsi_unsupported},
8205 [TGSI_OPCODE_ATOMXOR] = { ALU_OP0_NOP, tgsi_unsupported},
8206 [TGSI_OPCODE_ATOMUMIN] = { ALU_OP0_NOP, tgsi_unsupported},
8207 [TGSI_OPCODE_ATOMUMAX] = { ALU_OP0_NOP, tgsi_unsupported},
8208 [TGSI_OPCODE_ATOMIMIN] = { ALU_OP0_NOP, tgsi_unsupported},
8209 [TGSI_OPCODE_ATOMIMAX] = { ALU_OP0_NOP, tgsi_unsupported},
8210 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
8211 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
8212 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
8213 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, tgsi_op2_trans},
8214 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, tgsi_op2_trans},
8215 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_tex},
8216 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_tex},
8217 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_op3},
8218 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_op3},
8219 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_bfi},
8220 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_op2},
8221 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_op2},
8222 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_op2},
8223 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_msb},
8224 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_msb},
8225 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_interp_egcm},
8226 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_interp_egcm},
8227 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_interp_egcm},
8228 [TGSI_OPCODE_F2D] = { ALU_OP1_FLT32_TO_FLT64, tgsi_op2_64},
8229 [TGSI_OPCODE_D2F] = { ALU_OP1_FLT64_TO_FLT32, tgsi_op2_64_single_dest},
8230 [TGSI_OPCODE_DABS] = { ALU_OP1_MOV, tgsi_op2_64},
8231 [TGSI_OPCODE_DNEG] = { ALU_OP2_ADD_64, tgsi_dneg},
8232 [TGSI_OPCODE_DADD] = { ALU_OP2_ADD_64, tgsi_op2_64},
8233 [TGSI_OPCODE_DMUL] = { ALU_OP2_MUL_64, cayman_mul_double_instr},
8234 [TGSI_OPCODE_DMAX] = { ALU_OP2_MAX_64, tgsi_op2_64},
8235 [TGSI_OPCODE_DMIN] = { ALU_OP2_MIN_64, tgsi_op2_64},
8236 [TGSI_OPCODE_DSLT] = { ALU_OP2_SETGT_64, tgsi_op2_64_single_dest_s},
8237 [TGSI_OPCODE_DSGE] = { ALU_OP2_SETGE_64, tgsi_op2_64_single_dest},
8238 [TGSI_OPCODE_DSEQ] = { ALU_OP2_SETE_64, tgsi_op2_64_single_dest},
8239 [TGSI_OPCODE_DSNE] = { ALU_OP2_SETNE_64, tgsi_op2_64_single_dest},
8240 [TGSI_OPCODE_DRCP] = { ALU_OP2_RECIP_64, cayman_emit_double_instr},
8241 [TGSI_OPCODE_DSQRT] = { ALU_OP2_SQRT_64, cayman_emit_double_instr},
8242 [TGSI_OPCODE_DMAD] = { ALU_OP3_FMA_64, tgsi_op3_64},
8243 [TGSI_OPCODE_DFRAC] = { ALU_OP1_FRACT_64, tgsi_op2_64},
8244 [TGSI_OPCODE_DLDEXP] = { ALU_OP2_LDEXP_64, tgsi_op2_64},
8245 [TGSI_OPCODE_DFRACEXP] = { ALU_OP1_FREXP_64, tgsi_dfracexp},
8246 [TGSI_OPCODE_D2I] = { ALU_OP1_FLT_TO_INT, egcm_double_to_int},
8247 [TGSI_OPCODE_I2D] = { ALU_OP1_INT_TO_FLT, egcm_int_to_double},
8248 [TGSI_OPCODE_D2U] = { ALU_OP1_FLT_TO_UINT, egcm_double_to_int},
8249 [TGSI_OPCODE_U2D] = { ALU_OP1_UINT_TO_FLT, egcm_int_to_double},
8250 [TGSI_OPCODE_DRSQ] = { ALU_OP2_RECIPSQRT_64, cayman_emit_double_instr},
8251 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
8252 };
8253
8254 static const struct r600_shader_tgsi_instruction cm_shader_tgsi_instruction[] = {
8255 [TGSI_OPCODE_ARL] = { ALU_OP0_NOP, tgsi_eg_arl},
8256 [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
8257 [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
8258 [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, cayman_emit_float_instr},
8259 [TGSI_OPCODE_RSQ] = { ALU_OP1_RECIPSQRT_IEEE, cayman_emit_float_instr},
8260 [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
8261 [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
8262 [TGSI_OPCODE_MUL] = { ALU_OP2_MUL, tgsi_op2},
8263 [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2},
8264 [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4, tgsi_dp},
8265 [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4, tgsi_dp},
8266 [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
8267 [TGSI_OPCODE_MIN] = { ALU_OP2_MIN, tgsi_op2},
8268 [TGSI_OPCODE_MAX] = { ALU_OP2_MAX, tgsi_op2},
8269 [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
8270 [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
8271 [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD, tgsi_op3},
8272 [TGSI_OPCODE_SUB] = { ALU_OP2_ADD, tgsi_op2},
8273 [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
8274 [TGSI_OPCODE_FMA] = { ALU_OP0_NOP, tgsi_unsupported},
8275 [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, cayman_emit_float_instr},
8276 [TGSI_OPCODE_DP2A] = { ALU_OP0_NOP, tgsi_unsupported},
8277 [22] = { ALU_OP0_NOP, tgsi_unsupported},
8278 [23] = { ALU_OP0_NOP, tgsi_unsupported},
8279 [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
8280 [TGSI_OPCODE_CLAMP] = { ALU_OP0_NOP, tgsi_unsupported},
8281 [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2},
8282 [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2},
8283 [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, cayman_emit_float_instr},
8284 [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, cayman_emit_float_instr},
8285 [TGSI_OPCODE_POW] = { ALU_OP0_NOP, cayman_pow},
8286 [TGSI_OPCODE_XPD] = { ALU_OP0_NOP, tgsi_xpd},
8287 [32] = { ALU_OP0_NOP, tgsi_unsupported},
8288 [TGSI_OPCODE_ABS] = { ALU_OP1_MOV, tgsi_op2},
8289 [34] = { ALU_OP0_NOP, tgsi_unsupported},
8290 [TGSI_OPCODE_DPH] = { ALU_OP2_DOT4, tgsi_dp},
8291 [TGSI_OPCODE_COS] = { ALU_OP1_COS, cayman_trig},
8292 [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
8293 [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
8294 [TGSI_OPCODE_KILL] = { ALU_OP2_KILLGT, tgsi_kill}, /* unconditional kill */
8295 [TGSI_OPCODE_PK2H] = { ALU_OP0_NOP, tgsi_unsupported},
8296 [TGSI_OPCODE_PK2US] = { ALU_OP0_NOP, tgsi_unsupported},
8297 [TGSI_OPCODE_PK4B] = { ALU_OP0_NOP, tgsi_unsupported},
8298 [TGSI_OPCODE_PK4UB] = { ALU_OP0_NOP, tgsi_unsupported},
8299 [44] = { ALU_OP0_NOP, tgsi_unsupported},
8300 [TGSI_OPCODE_SEQ] = { ALU_OP2_SETE, tgsi_op2},
8301 [46] = { ALU_OP0_NOP, tgsi_unsupported},
8302 [TGSI_OPCODE_SGT] = { ALU_OP2_SETGT, tgsi_op2},
8303 [TGSI_OPCODE_SIN] = { ALU_OP1_SIN, cayman_trig},
8304 [TGSI_OPCODE_SLE] = { ALU_OP2_SETGE, tgsi_op2_swap},
8305 [TGSI_OPCODE_SNE] = { ALU_OP2_SETNE, tgsi_op2},
8306 [51] = { ALU_OP0_NOP, tgsi_unsupported},
8307 [TGSI_OPCODE_TEX] = { FETCH_OP_SAMPLE, tgsi_tex},
8308 [TGSI_OPCODE_TXD] = { FETCH_OP_SAMPLE_G, tgsi_tex},
8309 [TGSI_OPCODE_TXP] = { FETCH_OP_SAMPLE, tgsi_tex},
8310 [TGSI_OPCODE_UP2H] = { ALU_OP0_NOP, tgsi_unsupported},
8311 [TGSI_OPCODE_UP2US] = { ALU_OP0_NOP, tgsi_unsupported},
8312 [TGSI_OPCODE_UP4B] = { ALU_OP0_NOP, tgsi_unsupported},
8313 [TGSI_OPCODE_UP4UB] = { ALU_OP0_NOP, tgsi_unsupported},
8314 [59] = { ALU_OP0_NOP, tgsi_unsupported},
8315 [60] = { ALU_OP0_NOP, tgsi_unsupported},
8316 [TGSI_OPCODE_ARR] = { ALU_OP0_NOP, tgsi_eg_arl},
8317 [62] = { ALU_OP0_NOP, tgsi_unsupported},
8318 [TGSI_OPCODE_CAL] = { ALU_OP0_NOP, tgsi_unsupported},
8319 [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
8320 [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
8321 [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
8322 [TGSI_OPCODE_SCS] = { ALU_OP0_NOP, tgsi_scs},
8323 [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
8324 [69] = { ALU_OP0_NOP, tgsi_unsupported},
8325 [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
8326 [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4, tgsi_dp},
8327 [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex},
8328 [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont},
8329 [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if},
8330 [TGSI_OPCODE_UIF] = { ALU_OP0_NOP, tgsi_uif},
8331 [76] = { ALU_OP0_NOP, tgsi_unsupported},
8332 [TGSI_OPCODE_ELSE] = { ALU_OP0_NOP, tgsi_else},
8333 [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
8334 [TGSI_OPCODE_DDX_FINE] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
8335 [TGSI_OPCODE_DDY_FINE] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
8336 [TGSI_OPCODE_PUSHA] = { ALU_OP0_NOP, tgsi_unsupported},
8337 [TGSI_OPCODE_POPA] = { ALU_OP0_NOP, tgsi_unsupported},
8338 [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
8339 [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2},
8340 [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
8341 [TGSI_OPCODE_TRUNC] = { ALU_OP1_TRUNC, tgsi_op2},
8342 [TGSI_OPCODE_SHL] = { ALU_OP2_LSHL_INT, tgsi_op2},
8343 [88] = { ALU_OP0_NOP, tgsi_unsupported},
8344 [TGSI_OPCODE_AND] = { ALU_OP2_AND_INT, tgsi_op2},
8345 [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
8346 [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
8347 [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
8348 [TGSI_OPCODE_SAD] = { ALU_OP0_NOP, tgsi_unsupported},
8349 [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
8350 [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
8351 [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
8352 [TGSI_OPCODE_EMIT] = { CF_OP_EMIT_VERTEX, tgsi_gs_emit},
8353 [TGSI_OPCODE_ENDPRIM] = { CF_OP_CUT_VERTEX, tgsi_gs_emit},
8354 [TGSI_OPCODE_BGNLOOP] = { ALU_OP0_NOP, tgsi_bgnloop},
8355 [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
8356 [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
8357 [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
8358 [TGSI_OPCODE_TXQ_LZ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
8359 [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
8360 [105] = { ALU_OP0_NOP, tgsi_unsupported},
8361 [106] = { ALU_OP0_NOP, tgsi_unsupported},
8362 [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
8363 [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
8364 [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
8365 [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
8366 [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
8367 [112] = { ALU_OP0_NOP, tgsi_unsupported},
8368 [TGSI_OPCODE_CALLNZ] = { ALU_OP0_NOP, tgsi_unsupported},
8369 [114] = { ALU_OP0_NOP, tgsi_unsupported},
8370 [TGSI_OPCODE_BREAKC] = { ALU_OP0_NOP, tgsi_unsupported},
8371 [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
8372 [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
8373 [118] = { ALU_OP0_NOP, tgsi_unsupported},
8374 [TGSI_OPCODE_F2I] = { ALU_OP1_FLT_TO_INT, tgsi_op2},
8375 [TGSI_OPCODE_IDIV] = { ALU_OP0_NOP, tgsi_idiv},
8376 [TGSI_OPCODE_IMAX] = { ALU_OP2_MAX_INT, tgsi_op2},
8377 [TGSI_OPCODE_IMIN] = { ALU_OP2_MIN_INT, tgsi_op2},
8378 [TGSI_OPCODE_INEG] = { ALU_OP2_SUB_INT, tgsi_ineg},
8379 [TGSI_OPCODE_ISGE] = { ALU_OP2_SETGE_INT, tgsi_op2},
8380 [TGSI_OPCODE_ISHR] = { ALU_OP2_ASHR_INT, tgsi_op2},
8381 [TGSI_OPCODE_ISLT] = { ALU_OP2_SETGT_INT, tgsi_op2_swap},
8382 [TGSI_OPCODE_F2U] = { ALU_OP1_FLT_TO_UINT, tgsi_op2},
8383 [TGSI_OPCODE_U2F] = { ALU_OP1_UINT_TO_FLT, tgsi_op2},
8384 [TGSI_OPCODE_UADD] = { ALU_OP2_ADD_INT, tgsi_op2},
8385 [TGSI_OPCODE_UDIV] = { ALU_OP0_NOP, tgsi_udiv},
8386 [TGSI_OPCODE_UMAD] = { ALU_OP0_NOP, tgsi_umad},
8387 [TGSI_OPCODE_UMAX] = { ALU_OP2_MAX_UINT, tgsi_op2},
8388 [TGSI_OPCODE_UMIN] = { ALU_OP2_MIN_UINT, tgsi_op2},
8389 [TGSI_OPCODE_UMOD] = { ALU_OP0_NOP, tgsi_umod},
8390 [TGSI_OPCODE_UMUL] = { ALU_OP2_MULLO_INT, cayman_mul_int_instr},
8391 [TGSI_OPCODE_USEQ] = { ALU_OP2_SETE_INT, tgsi_op2},
8392 [TGSI_OPCODE_USGE] = { ALU_OP2_SETGE_UINT, tgsi_op2},
8393 [TGSI_OPCODE_USHR] = { ALU_OP2_LSHR_INT, tgsi_op2},
8394 [TGSI_OPCODE_USLT] = { ALU_OP2_SETGT_UINT, tgsi_op2_swap},
8395 [TGSI_OPCODE_USNE] = { ALU_OP2_SETNE_INT, tgsi_op2},
8396 [TGSI_OPCODE_SWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
8397 [TGSI_OPCODE_CASE] = { ALU_OP0_NOP, tgsi_unsupported},
8398 [TGSI_OPCODE_DEFAULT] = { ALU_OP0_NOP, tgsi_unsupported},
8399 [TGSI_OPCODE_ENDSWITCH] = { ALU_OP0_NOP, tgsi_unsupported},
8400 [TGSI_OPCODE_SAMPLE] = { 0, tgsi_unsupported},
8401 [TGSI_OPCODE_SAMPLE_I] = { 0, tgsi_unsupported},
8402 [TGSI_OPCODE_SAMPLE_I_MS] = { 0, tgsi_unsupported},
8403 [TGSI_OPCODE_SAMPLE_B] = { 0, tgsi_unsupported},
8404 [TGSI_OPCODE_SAMPLE_C] = { 0, tgsi_unsupported},
8405 [TGSI_OPCODE_SAMPLE_C_LZ] = { 0, tgsi_unsupported},
8406 [TGSI_OPCODE_SAMPLE_D] = { 0, tgsi_unsupported},
8407 [TGSI_OPCODE_SAMPLE_L] = { 0, tgsi_unsupported},
8408 [TGSI_OPCODE_GATHER4] = { 0, tgsi_unsupported},
8409 [TGSI_OPCODE_SVIEWINFO] = { 0, tgsi_unsupported},
8410 [TGSI_OPCODE_SAMPLE_POS] = { 0, tgsi_unsupported},
8411 [TGSI_OPCODE_SAMPLE_INFO] = { 0, tgsi_unsupported},
8412 [TGSI_OPCODE_UARL] = { ALU_OP1_MOVA_INT, tgsi_eg_arl},
8413 [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
8414 [TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
8415 [TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
8416 [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_unsupported},
8417 [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_unsupported},
8418 [TGSI_OPCODE_MFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8419 [TGSI_OPCODE_LFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8420 [TGSI_OPCODE_SFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
8421 [TGSI_OPCODE_BARRIER] = { ALU_OP0_NOP, tgsi_unsupported},
8422 [TGSI_OPCODE_ATOMUADD] = { ALU_OP0_NOP, tgsi_unsupported},
8423 [TGSI_OPCODE_ATOMXCHG] = { ALU_OP0_NOP, tgsi_unsupported},
8424 [TGSI_OPCODE_ATOMCAS] = { ALU_OP0_NOP, tgsi_unsupported},
8425 [TGSI_OPCODE_ATOMAND] = { ALU_OP0_NOP, tgsi_unsupported},
8426 [TGSI_OPCODE_ATOMOR] = { ALU_OP0_NOP, tgsi_unsupported},
8427 [TGSI_OPCODE_ATOMXOR] = { ALU_OP0_NOP, tgsi_unsupported},
8428 [TGSI_OPCODE_ATOMUMIN] = { ALU_OP0_NOP, tgsi_unsupported},
8429 [TGSI_OPCODE_ATOMUMAX] = { ALU_OP0_NOP, tgsi_unsupported},
8430 [TGSI_OPCODE_ATOMIMIN] = { ALU_OP0_NOP, tgsi_unsupported},
8431 [TGSI_OPCODE_ATOMIMAX] = { ALU_OP0_NOP, tgsi_unsupported},
8432 [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
8433 [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
8434 [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
8435 [TGSI_OPCODE_IMUL_HI] = { ALU_OP2_MULHI_INT, cayman_mul_int_instr},
8436 [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, cayman_mul_int_instr},
8437 [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_tex},
8438 [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_tex},
8439 [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_op3},
8440 [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_op3},
8441 [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_bfi},
8442 [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_op2},
8443 [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_op2},
8444 [TGSI_OPCODE_LSB] = { ALU_OP1_FFBL_INT, tgsi_op2},
8445 [TGSI_OPCODE_IMSB] = { ALU_OP1_FFBH_INT, tgsi_msb},
8446 [TGSI_OPCODE_UMSB] = { ALU_OP1_FFBH_UINT, tgsi_msb},
8447 [TGSI_OPCODE_INTERP_CENTROID] = { ALU_OP0_NOP, tgsi_interp_egcm},
8448 [TGSI_OPCODE_INTERP_SAMPLE] = { ALU_OP0_NOP, tgsi_interp_egcm},
8449 [TGSI_OPCODE_INTERP_OFFSET] = { ALU_OP0_NOP, tgsi_interp_egcm},
8450 [TGSI_OPCODE_F2D] = { ALU_OP1_FLT32_TO_FLT64, tgsi_op2_64},
8451 [TGSI_OPCODE_D2F] = { ALU_OP1_FLT64_TO_FLT32, tgsi_op2_64_single_dest},
8452 [TGSI_OPCODE_DABS] = { ALU_OP1_MOV, tgsi_op2_64},
8453 [TGSI_OPCODE_DNEG] = { ALU_OP2_ADD_64, tgsi_dneg},
8454 [TGSI_OPCODE_DADD] = { ALU_OP2_ADD_64, tgsi_op2_64},
8455 [TGSI_OPCODE_DMUL] = { ALU_OP2_MUL_64, cayman_mul_double_instr},
8456 [TGSI_OPCODE_DMAX] = { ALU_OP2_MAX_64, tgsi_op2_64},
8457 [TGSI_OPCODE_DMIN] = { ALU_OP2_MIN_64, tgsi_op2_64},
8458 [TGSI_OPCODE_DSLT] = { ALU_OP2_SETGT_64, tgsi_op2_64_single_dest_s},
8459 [TGSI_OPCODE_DSGE] = { ALU_OP2_SETGE_64, tgsi_op2_64_single_dest},
8460 [TGSI_OPCODE_DSEQ] = { ALU_OP2_SETE_64, tgsi_op2_64_single_dest},
8461 [TGSI_OPCODE_DSNE] = { ALU_OP2_SETNE_64, tgsi_op2_64_single_dest},
8462 [TGSI_OPCODE_DRCP] = { ALU_OP2_RECIP_64, cayman_emit_double_instr},
8463 [TGSI_OPCODE_DSQRT] = { ALU_OP2_SQRT_64, cayman_emit_double_instr},
8464 [TGSI_OPCODE_DMAD] = { ALU_OP3_FMA_64, tgsi_op3_64},
8465 [TGSI_OPCODE_DFRAC] = { ALU_OP1_FRACT_64, tgsi_op2_64},
8466 [TGSI_OPCODE_DLDEXP] = { ALU_OP2_LDEXP_64, tgsi_op2_64},
8467 [TGSI_OPCODE_DFRACEXP] = { ALU_OP1_FREXP_64, tgsi_dfracexp},
8468 [TGSI_OPCODE_D2I] = { ALU_OP1_FLT_TO_INT, egcm_double_to_int},
8469 [TGSI_OPCODE_I2D] = { ALU_OP1_INT_TO_FLT, egcm_int_to_double},
8470 [TGSI_OPCODE_D2U] = { ALU_OP1_FLT_TO_UINT, egcm_double_to_int},
8471 [TGSI_OPCODE_U2D] = { ALU_OP1_UINT_TO_FLT, egcm_int_to_double},
8472 [TGSI_OPCODE_DRSQ] = { ALU_OP2_RECIPSQRT_64, cayman_emit_double_instr},
8473 [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
8474 };